All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos Maven / Gradle / Ivy

There is a newer version: 3.2.0-9
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: ClientNamenodeProtocol.proto

package org.apache.hadoop.hdfs.protocol.proto;

public final class ClientNamenodeProtocolProtos {
  private ClientNamenodeProtocolProtos() {}
  public static void registerAllExtensions(
      io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistry registry) {
  }
  /**
   * Protobuf enum {@code hadoop.hdfs.CreateFlagProto}
   */
  public enum CreateFlagProto
      implements io.prestosql.hadoop.$internal.com.google.protobuf.ProtocolMessageEnum {
    /**
     * CREATE = 1;
     *
     * 
     * Create a file
     * 
*/ CREATE(0, 1), /** * OVERWRITE = 2; * *
     * Truncate/overwrite a file. Same as POSIX O_TRUNC
     * 
*/ OVERWRITE(1, 2), /** * APPEND = 4; * *
     * Append to a file
     * 
*/ APPEND(2, 4), /** * LAZY_PERSIST = 16; * *
     * File with reduced durability guarantees.
     * 
*/ LAZY_PERSIST(3, 16), /** * NEW_BLOCK = 32; * *
     * Write data to a new block when appending
     * 
*/ NEW_BLOCK(4, 32), /** * SHOULD_REPLICATE = 128; * *
     * Enforce to create a replicate file
     * 
*/ SHOULD_REPLICATE(5, 128), ; /** * CREATE = 1; * *
     * Create a file
     * 
*/ public static final int CREATE_VALUE = 1; /** * OVERWRITE = 2; * *
     * Truncate/overwrite a file. Same as POSIX O_TRUNC
     * 
*/ public static final int OVERWRITE_VALUE = 2; /** * APPEND = 4; * *
     * Append to a file
     * 
*/ public static final int APPEND_VALUE = 4; /** * LAZY_PERSIST = 16; * *
     * File with reduced durability guarantees.
     * 
*/ public static final int LAZY_PERSIST_VALUE = 16; /** * NEW_BLOCK = 32; * *
     * Write data to a new block when appending
     * 
*/ public static final int NEW_BLOCK_VALUE = 32; /** * SHOULD_REPLICATE = 128; * *
     * Enforce to create a replicate file
     * 
*/ public static final int SHOULD_REPLICATE_VALUE = 128; public final int getNumber() { return value; } public static CreateFlagProto valueOf(int value) { switch (value) { case 1: return CREATE; case 2: return OVERWRITE; case 4: return APPEND; case 16: return LAZY_PERSIST; case 32: return NEW_BLOCK; case 128: return SHOULD_REPLICATE; default: return null; } } public static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalValueMap = new io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap() { public CreateFlagProto findValueByNumber(int number) { return CreateFlagProto.valueOf(number); } }; public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getEnumTypes().get(0); } private static final CreateFlagProto[] VALUES = values(); public static CreateFlagProto valueOf( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private CreateFlagProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.CreateFlagProto) } /** * Protobuf enum {@code hadoop.hdfs.AddBlockFlagProto} */ public enum AddBlockFlagProto implements io.prestosql.hadoop.$internal.com.google.protobuf.ProtocolMessageEnum { /** * NO_LOCAL_WRITE = 1; * *
     * avoid writing to local node.
     * 
*/ NO_LOCAL_WRITE(0, 1), /** * IGNORE_CLIENT_LOCALITY = 2; * *
     * write to a random node
     * 
*/ IGNORE_CLIENT_LOCALITY(1, 2), ; /** * NO_LOCAL_WRITE = 1; * *
     * avoid writing to local node.
     * 
*/ public static final int NO_LOCAL_WRITE_VALUE = 1; /** * IGNORE_CLIENT_LOCALITY = 2; * *
     * write to a random node
     * 
*/ public static final int IGNORE_CLIENT_LOCALITY_VALUE = 2; public final int getNumber() { return value; } public static AddBlockFlagProto valueOf(int value) { switch (value) { case 1: return NO_LOCAL_WRITE; case 2: return IGNORE_CLIENT_LOCALITY; default: return null; } } public static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalValueMap = new io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap() { public AddBlockFlagProto findValueByNumber(int number) { return AddBlockFlagProto.valueOf(number); } }; public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getEnumTypes().get(1); } private static final AddBlockFlagProto[] VALUES = values(); public static AddBlockFlagProto valueOf( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private AddBlockFlagProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.AddBlockFlagProto) } /** * Protobuf enum {@code hadoop.hdfs.DatanodeReportTypeProto} * *
   * type of the datanode report
   * 
*/ public enum DatanodeReportTypeProto implements io.prestosql.hadoop.$internal.com.google.protobuf.ProtocolMessageEnum { /** * ALL = 1; */ ALL(0, 1), /** * LIVE = 2; */ LIVE(1, 2), /** * DEAD = 3; */ DEAD(2, 3), /** * DECOMMISSIONING = 4; */ DECOMMISSIONING(3, 4), /** * ENTERING_MAINTENANCE = 5; */ ENTERING_MAINTENANCE(4, 5), /** * IN_MAINTENANCE = 6; */ IN_MAINTENANCE(5, 6), ; /** * ALL = 1; */ public static final int ALL_VALUE = 1; /** * LIVE = 2; */ public static final int LIVE_VALUE = 2; /** * DEAD = 3; */ public static final int DEAD_VALUE = 3; /** * DECOMMISSIONING = 4; */ public static final int DECOMMISSIONING_VALUE = 4; /** * ENTERING_MAINTENANCE = 5; */ public static final int ENTERING_MAINTENANCE_VALUE = 5; /** * IN_MAINTENANCE = 6; */ public static final int IN_MAINTENANCE_VALUE = 6; public final int getNumber() { return value; } public static DatanodeReportTypeProto valueOf(int value) { switch (value) { case 1: return ALL; case 2: return LIVE; case 3: return DEAD; case 4: return DECOMMISSIONING; case 5: return ENTERING_MAINTENANCE; case 6: return IN_MAINTENANCE; default: return null; } } public static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalValueMap = new io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap() { public DatanodeReportTypeProto findValueByNumber(int number) { return DatanodeReportTypeProto.valueOf(number); } }; public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getEnumTypes().get(2); } private static final DatanodeReportTypeProto[] VALUES = values(); public static DatanodeReportTypeProto valueOf( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private DatanodeReportTypeProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeReportTypeProto) } /** * Protobuf enum {@code hadoop.hdfs.SafeModeActionProto} */ public enum SafeModeActionProto implements io.prestosql.hadoop.$internal.com.google.protobuf.ProtocolMessageEnum { /** * SAFEMODE_LEAVE = 1; */ SAFEMODE_LEAVE(0, 1), /** * SAFEMODE_ENTER = 2; */ SAFEMODE_ENTER(1, 2), /** * SAFEMODE_GET = 3; */ SAFEMODE_GET(2, 3), /** * SAFEMODE_FORCE_EXIT = 4; */ SAFEMODE_FORCE_EXIT(3, 4), ; /** * SAFEMODE_LEAVE = 1; */ public static final int SAFEMODE_LEAVE_VALUE = 1; /** * SAFEMODE_ENTER = 2; */ public static final int SAFEMODE_ENTER_VALUE = 2; /** * SAFEMODE_GET = 3; */ public static final int SAFEMODE_GET_VALUE = 3; /** * SAFEMODE_FORCE_EXIT = 4; */ public static final int SAFEMODE_FORCE_EXIT_VALUE = 4; public final int getNumber() { return value; } public static SafeModeActionProto valueOf(int value) { switch (value) { case 1: return SAFEMODE_LEAVE; case 2: return SAFEMODE_ENTER; case 3: return SAFEMODE_GET; case 4: return SAFEMODE_FORCE_EXIT; default: return null; } } public static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalValueMap = new io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap() { public SafeModeActionProto findValueByNumber(int number) { return SafeModeActionProto.valueOf(number); } }; public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getEnumTypes().get(3); } private static final SafeModeActionProto[] VALUES = values(); public static SafeModeActionProto valueOf( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private SafeModeActionProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.SafeModeActionProto) } /** * Protobuf enum {@code hadoop.hdfs.RollingUpgradeActionProto} */ public enum RollingUpgradeActionProto implements io.prestosql.hadoop.$internal.com.google.protobuf.ProtocolMessageEnum { /** * QUERY = 1; */ QUERY(0, 1), /** * START = 2; */ START(1, 2), /** * FINALIZE = 3; */ FINALIZE(2, 3), ; /** * QUERY = 1; */ public static final int QUERY_VALUE = 1; /** * START = 2; */ public static final int START_VALUE = 2; /** * FINALIZE = 3; */ public static final int FINALIZE_VALUE = 3; public final int getNumber() { return value; } public static RollingUpgradeActionProto valueOf(int value) { switch (value) { case 1: return QUERY; case 2: return START; case 3: return FINALIZE; default: return null; } } public static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalValueMap = new io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap() { public RollingUpgradeActionProto findValueByNumber(int number) { return RollingUpgradeActionProto.valueOf(number); } }; public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getEnumTypes().get(4); } private static final RollingUpgradeActionProto[] VALUES = values(); public static RollingUpgradeActionProto valueOf( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private RollingUpgradeActionProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.RollingUpgradeActionProto) } /** * Protobuf enum {@code hadoop.hdfs.CacheFlagProto} */ public enum CacheFlagProto implements io.prestosql.hadoop.$internal.com.google.protobuf.ProtocolMessageEnum { /** * FORCE = 1; * *
     * Ignore pool resource limits
     * 
*/ FORCE(0, 1), ; /** * FORCE = 1; * *
     * Ignore pool resource limits
     * 
*/ public static final int FORCE_VALUE = 1; public final int getNumber() { return value; } public static CacheFlagProto valueOf(int value) { switch (value) { case 1: return FORCE; default: return null; } } public static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalValueMap = new io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap() { public CacheFlagProto findValueByNumber(int number) { return CacheFlagProto.valueOf(number); } }; public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getEnumTypes().get(5); } private static final CacheFlagProto[] VALUES = values(); public static CacheFlagProto valueOf( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private CacheFlagProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.CacheFlagProto) } /** * Protobuf enum {@code hadoop.hdfs.OpenFilesTypeProto} */ public enum OpenFilesTypeProto implements io.prestosql.hadoop.$internal.com.google.protobuf.ProtocolMessageEnum { /** * ALL_OPEN_FILES = 1; */ ALL_OPEN_FILES(0, 1), /** * BLOCKING_DECOMMISSION = 2; */ BLOCKING_DECOMMISSION(1, 2), ; /** * ALL_OPEN_FILES = 1; */ public static final int ALL_OPEN_FILES_VALUE = 1; /** * BLOCKING_DECOMMISSION = 2; */ public static final int BLOCKING_DECOMMISSION_VALUE = 2; public final int getNumber() { return value; } public static OpenFilesTypeProto valueOf(int value) { switch (value) { case 1: return ALL_OPEN_FILES; case 2: return BLOCKING_DECOMMISSION; default: return null; } } public static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap internalValueMap = new io.prestosql.hadoop.$internal.com.google.protobuf.Internal.EnumLiteMap() { public OpenFilesTypeProto findValueByNumber(int number) { return OpenFilesTypeProto.valueOf(number); } }; public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getEnumTypes().get(6); } private static final OpenFilesTypeProto[] VALUES = values(); public static OpenFilesTypeProto valueOf( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private OpenFilesTypeProto(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:hadoop.hdfs.OpenFilesTypeProto) } public interface GetBlockLocationsRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; * *
     * file name
     * 
*/ boolean hasSrc(); /** * required string src = 1; * *
     * file name
     * 
*/ java.lang.String getSrc(); /** * required string src = 1; * *
     * file name
     * 
*/ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required uint64 offset = 2; /** * required uint64 offset = 2; * *
     * range start offset
     * 
*/ boolean hasOffset(); /** * required uint64 offset = 2; * *
     * range start offset
     * 
*/ long getOffset(); // required uint64 length = 3; /** * required uint64 length = 3; * *
     * range length
     * 
*/ boolean hasLength(); /** * required uint64 length = 3; * *
     * range length
     * 
*/ long getLength(); } /** * Protobuf type {@code hadoop.hdfs.GetBlockLocationsRequestProto} */ public static final class GetBlockLocationsRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetBlockLocationsRequestProtoOrBuilder { // Use GetBlockLocationsRequestProto.newBuilder() to construct. private GetBlockLocationsRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetBlockLocationsRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetBlockLocationsRequestProto defaultInstance; public static GetBlockLocationsRequestProto getDefaultInstance() { return defaultInstance; } public GetBlockLocationsRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetBlockLocationsRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; offset_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; length_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocationsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocationsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetBlockLocationsRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetBlockLocationsRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; * *
     * file name
     * 
*/ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; * *
     * file name
     * 
*/ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; * *
     * file name
     * 
*/ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required uint64 offset = 2; public static final int OFFSET_FIELD_NUMBER = 2; private long offset_; /** * required uint64 offset = 2; * *
     * range start offset
     * 
*/ public boolean hasOffset() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 offset = 2; * *
     * range start offset
     * 
*/ public long getOffset() { return offset_; } // required uint64 length = 3; public static final int LENGTH_FIELD_NUMBER = 3; private long length_; /** * required uint64 length = 3; * *
     * range length
     * 
*/ public boolean hasLength() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 length = 3; * *
     * range length
     * 
*/ public long getLength() { return length_; } private void initFields() { src_ = ""; offset_ = 0L; length_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasOffset()) { memoizedIsInitialized = 0; return false; } if (!hasLength()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, offset_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, length_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(2, offset_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(3, length_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasOffset() == other.hasOffset()); if (hasOffset()) { result = result && (getOffset() == other.getOffset()); } result = result && (hasLength() == other.hasLength()); if (hasLength()) { result = result && (getLength() == other.getLength()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasOffset()) { hash = (37 * hash) + OFFSET_FIELD_NUMBER; hash = (53 * hash) + hashLong(getOffset()); } if (hasLength()) { hash = (37 * hash) + LENGTH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLength()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetBlockLocationsRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocationsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocationsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); offset_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); length_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocationsRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.offset_ = offset_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.length_ = length_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasOffset()) { setOffset(other.getOffset()); } if (other.hasLength()) { setLength(other.getLength()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasOffset()) { return false; } if (!hasLength()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; * *
       * file name
       * 
*/ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; * *
       * file name
       * 
*/ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; * *
       * file name
       * 
*/ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; * *
       * file name
       * 
*/ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; * *
       * file name
       * 
*/ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; * *
       * file name
       * 
*/ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required uint64 offset = 2; private long offset_ ; /** * required uint64 offset = 2; * *
       * range start offset
       * 
*/ public boolean hasOffset() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 offset = 2; * *
       * range start offset
       * 
*/ public long getOffset() { return offset_; } /** * required uint64 offset = 2; * *
       * range start offset
       * 
*/ public Builder setOffset(long value) { bitField0_ |= 0x00000002; offset_ = value; onChanged(); return this; } /** * required uint64 offset = 2; * *
       * range start offset
       * 
*/ public Builder clearOffset() { bitField0_ = (bitField0_ & ~0x00000002); offset_ = 0L; onChanged(); return this; } // required uint64 length = 3; private long length_ ; /** * required uint64 length = 3; * *
       * range length
       * 
*/ public boolean hasLength() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 length = 3; * *
       * range length
       * 
*/ public long getLength() { return length_; } /** * required uint64 length = 3; * *
       * range length
       * 
*/ public Builder setLength(long value) { bitField0_ |= 0x00000004; length_ = value; onChanged(); return this; } /** * required uint64 length = 3; * *
       * range length
       * 
*/ public Builder clearLength() { bitField0_ = (bitField0_ & ~0x00000004); length_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBlockLocationsRequestProto) } static { defaultInstance = new GetBlockLocationsRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBlockLocationsRequestProto) } public interface GetBlockLocationsResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional .hadoop.hdfs.LocatedBlocksProto locations = 1; /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ boolean hasLocations(); /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations(); /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetBlockLocationsResponseProto} */ public static final class GetBlockLocationsResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetBlockLocationsResponseProtoOrBuilder { // Use GetBlockLocationsResponseProto.newBuilder() to construct. private GetBlockLocationsResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetBlockLocationsResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetBlockLocationsResponseProto defaultInstance; public static GetBlockLocationsResponseProto getDefaultInstance() { return defaultInstance; } public GetBlockLocationsResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetBlockLocationsResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = locations_.toBuilder(); } locations_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(locations_); locations_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocationsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocationsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetBlockLocationsResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetBlockLocationsResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional .hadoop.hdfs.LocatedBlocksProto locations = 1; public static final int LOCATIONS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_; /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ public boolean hasLocations() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { return locations_; } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { return locations_; } private void initFields() { locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (hasLocations()) { if (!getLocations().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, locations_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, locations_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto) obj; boolean result = true; result = result && (hasLocations() == other.hasLocations()); if (hasLocations()) { result = result && getLocations() .equals(other.getLocations()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasLocations()) { hash = (37 * hash) + LOCATIONS_FIELD_NUMBER; hash = (53 * hash) + getLocations().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetBlockLocationsResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocationsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocationsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getLocationsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (locationsBuilder_ == null) { locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); } else { locationsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocationsResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (locationsBuilder_ == null) { result.locations_ = locations_; } else { result.locations_ = locationsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance()) return this; if (other.hasLocations()) { mergeLocations(other.getLocations()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (hasLocations()) { if (!getLocations().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional .hadoop.hdfs.LocatedBlocksProto locations = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> locationsBuilder_; /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ public boolean hasLocations() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { if (locationsBuilder_ == null) { return locations_; } else { return locationsBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ public Builder setLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { if (locationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } locations_ = value; onChanged(); } else { locationsBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ public Builder setLocations( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder builderForValue) { if (locationsBuilder_ == null) { locations_ = builderForValue.build(); onChanged(); } else { locationsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ public Builder mergeLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { if (locationsBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && locations_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) { locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder(locations_).mergeFrom(value).buildPartial(); } else { locations_ = value; } onChanged(); } else { locationsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ public Builder clearLocations() { if (locationsBuilder_ == null) { locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); onChanged(); } else { locationsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder getLocationsBuilder() { bitField0_ |= 0x00000001; onChanged(); return getLocationsFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { if (locationsBuilder_ != null) { return locationsBuilder_.getMessageOrBuilder(); } else { return locations_; } } /** * optional .hadoop.hdfs.LocatedBlocksProto locations = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> getLocationsFieldBuilder() { if (locationsBuilder_ == null) { locationsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>( locations_, getParentForChildren(), isClean()); locations_ = null; } return locationsBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBlockLocationsResponseProto) } static { defaultInstance = new GetBlockLocationsResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBlockLocationsResponseProto) } public interface GetServerDefaultsRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.GetServerDefaultsRequestProto} * *
   * No parameters
   * 
*/ public static final class GetServerDefaultsRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetServerDefaultsRequestProtoOrBuilder { // Use GetServerDefaultsRequestProto.newBuilder() to construct. private GetServerDefaultsRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetServerDefaultsRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetServerDefaultsRequestProto defaultInstance; public static GetServerDefaultsRequestProto getDefaultInstance() { return defaultInstance; } public GetServerDefaultsRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetServerDefaultsRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetServerDefaultsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetServerDefaultsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetServerDefaultsRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetServerDefaultsRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetServerDefaultsRequestProto} * *
     * No parameters
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetServerDefaultsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetServerDefaultsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetServerDefaultsRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetServerDefaultsRequestProto) } static { defaultInstance = new GetServerDefaultsRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetServerDefaultsRequestProto) } public interface GetServerDefaultsResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ boolean hasServerDefaults(); /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getServerDefaults(); /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder getServerDefaultsOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetServerDefaultsResponseProto} */ public static final class GetServerDefaultsResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetServerDefaultsResponseProtoOrBuilder { // Use GetServerDefaultsResponseProto.newBuilder() to construct. private GetServerDefaultsResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetServerDefaultsResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetServerDefaultsResponseProto defaultInstance; public static GetServerDefaultsResponseProto getDefaultInstance() { return defaultInstance; } public GetServerDefaultsResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetServerDefaultsResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = serverDefaults_.toBuilder(); } serverDefaults_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(serverDefaults_); serverDefaults_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetServerDefaultsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetServerDefaultsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetServerDefaultsResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetServerDefaultsResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; public static final int SERVERDEFAULTS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto serverDefaults_; /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ public boolean hasServerDefaults() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getServerDefaults() { return serverDefaults_; } /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder getServerDefaultsOrBuilder() { return serverDefaults_; } private void initFields() { serverDefaults_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasServerDefaults()) { memoizedIsInitialized = 0; return false; } if (!getServerDefaults().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, serverDefaults_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, serverDefaults_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto) obj; boolean result = true; result = result && (hasServerDefaults() == other.hasServerDefaults()); if (hasServerDefaults()) { result = result && getServerDefaults() .equals(other.getServerDefaults()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasServerDefaults()) { hash = (37 * hash) + SERVERDEFAULTS_FIELD_NUMBER; hash = (53 * hash) + getServerDefaults().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetServerDefaultsResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetServerDefaultsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetServerDefaultsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getServerDefaultsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (serverDefaultsBuilder_ == null) { serverDefaults_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); } else { serverDefaultsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetServerDefaultsResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (serverDefaultsBuilder_ == null) { result.serverDefaults_ = serverDefaults_; } else { result.serverDefaults_ = serverDefaultsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance()) return this; if (other.hasServerDefaults()) { mergeServerDefaults(other.getServerDefaults()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasServerDefaults()) { return false; } if (!getServerDefaults().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto serverDefaults_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder> serverDefaultsBuilder_; /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ public boolean hasServerDefaults() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getServerDefaults() { if (serverDefaultsBuilder_ == null) { return serverDefaults_; } else { return serverDefaultsBuilder_.getMessage(); } } /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ public Builder setServerDefaults(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto value) { if (serverDefaultsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } serverDefaults_ = value; onChanged(); } else { serverDefaultsBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ public Builder setServerDefaults( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder builderForValue) { if (serverDefaultsBuilder_ == null) { serverDefaults_ = builderForValue.build(); onChanged(); } else { serverDefaultsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ public Builder mergeServerDefaults(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto value) { if (serverDefaultsBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && serverDefaults_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance()) { serverDefaults_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.newBuilder(serverDefaults_).mergeFrom(value).buildPartial(); } else { serverDefaults_ = value; } onChanged(); } else { serverDefaultsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ public Builder clearServerDefaults() { if (serverDefaultsBuilder_ == null) { serverDefaults_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); onChanged(); } else { serverDefaultsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder getServerDefaultsBuilder() { bitField0_ |= 0x00000001; onChanged(); return getServerDefaultsFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder getServerDefaultsOrBuilder() { if (serverDefaultsBuilder_ != null) { return serverDefaultsBuilder_.getMessageOrBuilder(); } else { return serverDefaults_; } } /** * required .hadoop.hdfs.FsServerDefaultsProto serverDefaults = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder> getServerDefaultsFieldBuilder() { if (serverDefaultsBuilder_ == null) { serverDefaultsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder>( serverDefaults_, getParentForChildren(), isClean()); serverDefaults_ = null; } return serverDefaultsBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetServerDefaultsResponseProto) } static { defaultInstance = new GetServerDefaultsResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetServerDefaultsResponseProto) } public interface CreateRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required .hadoop.hdfs.FsPermissionProto masked = 2; /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ boolean hasMasked(); /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getMasked(); /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder(); // required string clientName = 3; /** * required string clientName = 3; */ boolean hasClientName(); /** * required string clientName = 3; */ java.lang.String getClientName(); /** * required string clientName = 3; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes(); // required uint32 createFlag = 4; /** * required uint32 createFlag = 4; * *
     * bits set using CreateFlag
     * 
*/ boolean hasCreateFlag(); /** * required uint32 createFlag = 4; * *
     * bits set using CreateFlag
     * 
*/ int getCreateFlag(); // required bool createParent = 5; /** * required bool createParent = 5; */ boolean hasCreateParent(); /** * required bool createParent = 5; */ boolean getCreateParent(); // required uint32 replication = 6; /** * required uint32 replication = 6; * *
     * Short: Only 16 bits used
     * 
*/ boolean hasReplication(); /** * required uint32 replication = 6; * *
     * Short: Only 16 bits used
     * 
*/ int getReplication(); // required uint64 blockSize = 7; /** * required uint64 blockSize = 7; */ boolean hasBlockSize(); /** * required uint64 blockSize = 7; */ long getBlockSize(); // repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ java.util.List getCryptoProtocolVersionList(); /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ int getCryptoProtocolVersionCount(); /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion(int index); // optional .hadoop.hdfs.FsPermissionProto unmasked = 9; /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ boolean hasUnmasked(); /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getUnmasked(); /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getUnmaskedOrBuilder(); // optional string ecPolicyName = 10; /** * optional string ecPolicyName = 10; */ boolean hasEcPolicyName(); /** * optional string ecPolicyName = 10; */ java.lang.String getEcPolicyName(); /** * optional string ecPolicyName = 10; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getEcPolicyNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.CreateRequestProto} */ public static final class CreateRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CreateRequestProtoOrBuilder { // Use CreateRequestProto.newBuilder() to construct. private CreateRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CreateRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CreateRequestProto defaultInstance; public static CreateRequestProto getDefaultInstance() { return defaultInstance; } public CreateRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CreateRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = masked_.toBuilder(); } masked_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(masked_); masked_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } case 26: { bitField0_ |= 0x00000004; clientName_ = input.readBytes(); break; } case 32: { bitField0_ |= 0x00000008; createFlag_ = input.readUInt32(); break; } case 40: { bitField0_ |= 0x00000010; createParent_ = input.readBool(); break; } case 48: { bitField0_ |= 0x00000020; replication_ = input.readUInt32(); break; } case 56: { bitField0_ |= 0x00000040; blockSize_ = input.readUInt64(); break; } case 64: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(8, rawValue); } else { if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { cryptoProtocolVersion_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000080; } cryptoProtocolVersion_.add(value); } break; } case 66: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(8, rawValue); } else { if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { cryptoProtocolVersion_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000080; } cryptoProtocolVersion_.add(value); } } input.popLimit(oldLimit); break; } case 74: { org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder subBuilder = null; if (((bitField0_ & 0x00000080) == 0x00000080)) { subBuilder = unmasked_.toBuilder(); } unmasked_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(unmasked_); unmasked_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000080; break; } case 82: { bitField0_ |= 0x00000100; ecPolicyName_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { cryptoProtocolVersion_ = java.util.Collections.unmodifiableList(cryptoProtocolVersion_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CreateRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CreateRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required .hadoop.hdfs.FsPermissionProto masked = 2; public static final int MASKED_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto masked_; /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public boolean hasMasked() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getMasked() { return masked_; } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder() { return masked_; } // required string clientName = 3; public static final int CLIENTNAME_FIELD_NUMBER = 3; private java.lang.Object clientName_; /** * required string clientName = 3; */ public boolean hasClientName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string clientName = 3; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * required string clientName = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required uint32 createFlag = 4; public static final int CREATEFLAG_FIELD_NUMBER = 4; private int createFlag_; /** * required uint32 createFlag = 4; * *
     * bits set using CreateFlag
     * 
*/ public boolean hasCreateFlag() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint32 createFlag = 4; * *
     * bits set using CreateFlag
     * 
*/ public int getCreateFlag() { return createFlag_; } // required bool createParent = 5; public static final int CREATEPARENT_FIELD_NUMBER = 5; private boolean createParent_; /** * required bool createParent = 5; */ public boolean hasCreateParent() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required bool createParent = 5; */ public boolean getCreateParent() { return createParent_; } // required uint32 replication = 6; public static final int REPLICATION_FIELD_NUMBER = 6; private int replication_; /** * required uint32 replication = 6; * *
     * Short: Only 16 bits used
     * 
*/ public boolean hasReplication() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required uint32 replication = 6; * *
     * Short: Only 16 bits used
     * 
*/ public int getReplication() { return replication_; } // required uint64 blockSize = 7; public static final int BLOCKSIZE_FIELD_NUMBER = 7; private long blockSize_; /** * required uint64 blockSize = 7; */ public boolean hasBlockSize() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * required uint64 blockSize = 7; */ public long getBlockSize() { return blockSize_; } // repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 8; private java.util.List cryptoProtocolVersion_; /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ public java.util.List getCryptoProtocolVersionList() { return cryptoProtocolVersion_; } /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ public int getCryptoProtocolVersionCount() { return cryptoProtocolVersion_.size(); } /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion(int index) { return cryptoProtocolVersion_.get(index); } // optional .hadoop.hdfs.FsPermissionProto unmasked = 9; public static final int UNMASKED_FIELD_NUMBER = 9; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto unmasked_; /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ public boolean hasUnmasked() { return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getUnmasked() { return unmasked_; } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getUnmaskedOrBuilder() { return unmasked_; } // optional string ecPolicyName = 10; public static final int ECPOLICYNAME_FIELD_NUMBER = 10; private java.lang.Object ecPolicyName_; /** * optional string ecPolicyName = 10; */ public boolean hasEcPolicyName() { return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional string ecPolicyName = 10; */ public java.lang.String getEcPolicyName() { java.lang.Object ref = ecPolicyName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ecPolicyName_ = s; } return s; } } /** * optional string ecPolicyName = 10; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getEcPolicyNameBytes() { java.lang.Object ref = ecPolicyName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ecPolicyName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { src_ = ""; masked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); clientName_ = ""; createFlag_ = 0; createParent_ = false; replication_ = 0; blockSize_ = 0L; cryptoProtocolVersion_ = java.util.Collections.emptyList(); unmasked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); ecPolicyName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasMasked()) { memoizedIsInitialized = 0; return false; } if (!hasClientName()) { memoizedIsInitialized = 0; return false; } if (!hasCreateFlag()) { memoizedIsInitialized = 0; return false; } if (!hasCreateParent()) { memoizedIsInitialized = 0; return false; } if (!hasReplication()) { memoizedIsInitialized = 0; return false; } if (!hasBlockSize()) { memoizedIsInitialized = 0; return false; } if (!getMasked().isInitialized()) { memoizedIsInitialized = 0; return false; } if (hasUnmasked()) { if (!getUnmasked().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, masked_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getClientNameBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt32(4, createFlag_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeBool(5, createParent_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt32(6, replication_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeUInt64(7, blockSize_); } for (int i = 0; i < cryptoProtocolVersion_.size(); i++) { output.writeEnum(8, cryptoProtocolVersion_.get(i).getNumber()); } if (((bitField0_ & 0x00000080) == 0x00000080)) { output.writeMessage(9, unmasked_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { output.writeBytes(10, getEcPolicyNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(2, masked_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(3, getClientNameBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt32Size(4, createFlag_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(5, createParent_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt32Size(6, replication_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(7, blockSize_); } { int dataSize = 0; for (int i = 0; i < cryptoProtocolVersion_.size(); i++) { dataSize += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeEnumSizeNoTag(cryptoProtocolVersion_.get(i).getNumber()); } size += dataSize; size += 1 * cryptoProtocolVersion_.size(); } if (((bitField0_ & 0x00000080) == 0x00000080)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(9, unmasked_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(10, getEcPolicyNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasMasked() == other.hasMasked()); if (hasMasked()) { result = result && getMasked() .equals(other.getMasked()); } result = result && (hasClientName() == other.hasClientName()); if (hasClientName()) { result = result && getClientName() .equals(other.getClientName()); } result = result && (hasCreateFlag() == other.hasCreateFlag()); if (hasCreateFlag()) { result = result && (getCreateFlag() == other.getCreateFlag()); } result = result && (hasCreateParent() == other.hasCreateParent()); if (hasCreateParent()) { result = result && (getCreateParent() == other.getCreateParent()); } result = result && (hasReplication() == other.hasReplication()); if (hasReplication()) { result = result && (getReplication() == other.getReplication()); } result = result && (hasBlockSize() == other.hasBlockSize()); if (hasBlockSize()) { result = result && (getBlockSize() == other.getBlockSize()); } result = result && getCryptoProtocolVersionList() .equals(other.getCryptoProtocolVersionList()); result = result && (hasUnmasked() == other.hasUnmasked()); if (hasUnmasked()) { result = result && getUnmasked() .equals(other.getUnmasked()); } result = result && (hasEcPolicyName() == other.hasEcPolicyName()); if (hasEcPolicyName()) { result = result && getEcPolicyName() .equals(other.getEcPolicyName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasMasked()) { hash = (37 * hash) + MASKED_FIELD_NUMBER; hash = (53 * hash) + getMasked().hashCode(); } if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } if (hasCreateFlag()) { hash = (37 * hash) + CREATEFLAG_FIELD_NUMBER; hash = (53 * hash) + getCreateFlag(); } if (hasCreateParent()) { hash = (37 * hash) + CREATEPARENT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getCreateParent()); } if (hasReplication()) { hash = (37 * hash) + REPLICATION_FIELD_NUMBER; hash = (53 * hash) + getReplication(); } if (hasBlockSize()) { hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBlockSize()); } if (getCryptoProtocolVersionCount() > 0) { hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getCryptoProtocolVersionList()); } if (hasUnmasked()) { hash = (37 * hash) + UNMASKED_FIELD_NUMBER; hash = (53 * hash) + getUnmasked().hashCode(); } if (hasEcPolicyName()) { hash = (37 * hash) + ECPOLICYNAME_FIELD_NUMBER; hash = (53 * hash) + getEcPolicyName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CreateRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getMaskedFieldBuilder(); getUnmaskedFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (maskedBuilder_ == null) { masked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); } else { maskedBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000004); createFlag_ = 0; bitField0_ = (bitField0_ & ~0x00000008); createParent_ = false; bitField0_ = (bitField0_ & ~0x00000010); replication_ = 0; bitField0_ = (bitField0_ & ~0x00000020); blockSize_ = 0L; bitField0_ = (bitField0_ & ~0x00000040); cryptoProtocolVersion_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000080); if (unmaskedBuilder_ == null) { unmasked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); } else { unmaskedBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000100); ecPolicyName_ = ""; bitField0_ = (bitField0_ & ~0x00000200); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (maskedBuilder_ == null) { result.masked_ = masked_; } else { result.masked_ = maskedBuilder_.build(); } if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.clientName_ = clientName_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.createFlag_ = createFlag_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.createParent_ = createParent_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.replication_ = replication_; if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } result.blockSize_ = blockSize_; if (((bitField0_ & 0x00000080) == 0x00000080)) { cryptoProtocolVersion_ = java.util.Collections.unmodifiableList(cryptoProtocolVersion_); bitField0_ = (bitField0_ & ~0x00000080); } result.cryptoProtocolVersion_ = cryptoProtocolVersion_; if (((from_bitField0_ & 0x00000100) == 0x00000100)) { to_bitField0_ |= 0x00000080; } if (unmaskedBuilder_ == null) { result.unmasked_ = unmasked_; } else { result.unmasked_ = unmaskedBuilder_.build(); } if (((from_bitField0_ & 0x00000200) == 0x00000200)) { to_bitField0_ |= 0x00000100; } result.ecPolicyName_ = ecPolicyName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasMasked()) { mergeMasked(other.getMasked()); } if (other.hasClientName()) { bitField0_ |= 0x00000004; clientName_ = other.clientName_; onChanged(); } if (other.hasCreateFlag()) { setCreateFlag(other.getCreateFlag()); } if (other.hasCreateParent()) { setCreateParent(other.getCreateParent()); } if (other.hasReplication()) { setReplication(other.getReplication()); } if (other.hasBlockSize()) { setBlockSize(other.getBlockSize()); } if (!other.cryptoProtocolVersion_.isEmpty()) { if (cryptoProtocolVersion_.isEmpty()) { cryptoProtocolVersion_ = other.cryptoProtocolVersion_; bitField0_ = (bitField0_ & ~0x00000080); } else { ensureCryptoProtocolVersionIsMutable(); cryptoProtocolVersion_.addAll(other.cryptoProtocolVersion_); } onChanged(); } if (other.hasUnmasked()) { mergeUnmasked(other.getUnmasked()); } if (other.hasEcPolicyName()) { bitField0_ |= 0x00000200; ecPolicyName_ = other.ecPolicyName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasMasked()) { return false; } if (!hasClientName()) { return false; } if (!hasCreateFlag()) { return false; } if (!hasCreateParent()) { return false; } if (!hasReplication()) { return false; } if (!hasBlockSize()) { return false; } if (!getMasked().isInitialized()) { return false; } if (hasUnmasked()) { if (!getUnmasked().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required .hadoop.hdfs.FsPermissionProto masked = 2; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto masked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> maskedBuilder_; /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public boolean hasMasked() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getMasked() { if (maskedBuilder_ == null) { return masked_; } else { return maskedBuilder_.getMessage(); } } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public Builder setMasked(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (maskedBuilder_ == null) { if (value == null) { throw new NullPointerException(); } masked_ = value; onChanged(); } else { maskedBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public Builder setMasked( org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) { if (maskedBuilder_ == null) { masked_ = builderForValue.build(); onChanged(); } else { maskedBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public Builder mergeMasked(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (maskedBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && masked_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) { masked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.newBuilder(masked_).mergeFrom(value).buildPartial(); } else { masked_ = value; } onChanged(); } else { maskedBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public Builder clearMasked() { if (maskedBuilder_ == null) { masked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); onChanged(); } else { maskedBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getMaskedBuilder() { bitField0_ |= 0x00000002; onChanged(); return getMaskedFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder() { if (maskedBuilder_ != null) { return maskedBuilder_.getMessageOrBuilder(); } else { return masked_; } } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> getMaskedFieldBuilder() { if (maskedBuilder_ == null) { maskedBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>( masked_, getParentForChildren(), isClean()); masked_ = null; } return maskedBuilder_; } // required string clientName = 3; private java.lang.Object clientName_ = ""; /** * required string clientName = 3; */ public boolean hasClientName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string clientName = 3; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientName = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientName = 3; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; clientName_ = value; onChanged(); return this; } /** * required string clientName = 3; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000004); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * required string clientName = 3; */ public Builder setClientNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; clientName_ = value; onChanged(); return this; } // required uint32 createFlag = 4; private int createFlag_ ; /** * required uint32 createFlag = 4; * *
       * bits set using CreateFlag
       * 
*/ public boolean hasCreateFlag() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint32 createFlag = 4; * *
       * bits set using CreateFlag
       * 
*/ public int getCreateFlag() { return createFlag_; } /** * required uint32 createFlag = 4; * *
       * bits set using CreateFlag
       * 
*/ public Builder setCreateFlag(int value) { bitField0_ |= 0x00000008; createFlag_ = value; onChanged(); return this; } /** * required uint32 createFlag = 4; * *
       * bits set using CreateFlag
       * 
*/ public Builder clearCreateFlag() { bitField0_ = (bitField0_ & ~0x00000008); createFlag_ = 0; onChanged(); return this; } // required bool createParent = 5; private boolean createParent_ ; /** * required bool createParent = 5; */ public boolean hasCreateParent() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required bool createParent = 5; */ public boolean getCreateParent() { return createParent_; } /** * required bool createParent = 5; */ public Builder setCreateParent(boolean value) { bitField0_ |= 0x00000010; createParent_ = value; onChanged(); return this; } /** * required bool createParent = 5; */ public Builder clearCreateParent() { bitField0_ = (bitField0_ & ~0x00000010); createParent_ = false; onChanged(); return this; } // required uint32 replication = 6; private int replication_ ; /** * required uint32 replication = 6; * *
       * Short: Only 16 bits used
       * 
*/ public boolean hasReplication() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required uint32 replication = 6; * *
       * Short: Only 16 bits used
       * 
*/ public int getReplication() { return replication_; } /** * required uint32 replication = 6; * *
       * Short: Only 16 bits used
       * 
*/ public Builder setReplication(int value) { bitField0_ |= 0x00000020; replication_ = value; onChanged(); return this; } /** * required uint32 replication = 6; * *
       * Short: Only 16 bits used
       * 
*/ public Builder clearReplication() { bitField0_ = (bitField0_ & ~0x00000020); replication_ = 0; onChanged(); return this; } // required uint64 blockSize = 7; private long blockSize_ ; /** * required uint64 blockSize = 7; */ public boolean hasBlockSize() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * required uint64 blockSize = 7; */ public long getBlockSize() { return blockSize_; } /** * required uint64 blockSize = 7; */ public Builder setBlockSize(long value) { bitField0_ |= 0x00000040; blockSize_ = value; onChanged(); return this; } /** * required uint64 blockSize = 7; */ public Builder clearBlockSize() { bitField0_ = (bitField0_ & ~0x00000040); blockSize_ = 0L; onChanged(); return this; } // repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; private java.util.List cryptoProtocolVersion_ = java.util.Collections.emptyList(); private void ensureCryptoProtocolVersionIsMutable() { if (!((bitField0_ & 0x00000080) == 0x00000080)) { cryptoProtocolVersion_ = new java.util.ArrayList(cryptoProtocolVersion_); bitField0_ |= 0x00000080; } } /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ public java.util.List getCryptoProtocolVersionList() { return java.util.Collections.unmodifiableList(cryptoProtocolVersion_); } /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ public int getCryptoProtocolVersionCount() { return cryptoProtocolVersion_.size(); } /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion(int index) { return cryptoProtocolVersion_.get(index); } /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ public Builder setCryptoProtocolVersion( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) { if (value == null) { throw new NullPointerException(); } ensureCryptoProtocolVersionIsMutable(); cryptoProtocolVersion_.set(index, value); onChanged(); return this; } /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ public Builder addCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) { if (value == null) { throw new NullPointerException(); } ensureCryptoProtocolVersionIsMutable(); cryptoProtocolVersion_.add(value); onChanged(); return this; } /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ public Builder addAllCryptoProtocolVersion( java.lang.Iterable values) { ensureCryptoProtocolVersionIsMutable(); super.addAll(values, cryptoProtocolVersion_); onChanged(); return this; } /** * repeated .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 8; */ public Builder clearCryptoProtocolVersion() { cryptoProtocolVersion_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000080); onChanged(); return this; } // optional .hadoop.hdfs.FsPermissionProto unmasked = 9; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto unmasked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> unmaskedBuilder_; /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ public boolean hasUnmasked() { return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getUnmasked() { if (unmaskedBuilder_ == null) { return unmasked_; } else { return unmaskedBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ public Builder setUnmasked(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (unmaskedBuilder_ == null) { if (value == null) { throw new NullPointerException(); } unmasked_ = value; onChanged(); } else { unmaskedBuilder_.setMessage(value); } bitField0_ |= 0x00000100; return this; } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ public Builder setUnmasked( org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) { if (unmaskedBuilder_ == null) { unmasked_ = builderForValue.build(); onChanged(); } else { unmaskedBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000100; return this; } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ public Builder mergeUnmasked(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (unmaskedBuilder_ == null) { if (((bitField0_ & 0x00000100) == 0x00000100) && unmasked_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) { unmasked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.newBuilder(unmasked_).mergeFrom(value).buildPartial(); } else { unmasked_ = value; } onChanged(); } else { unmaskedBuilder_.mergeFrom(value); } bitField0_ |= 0x00000100; return this; } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ public Builder clearUnmasked() { if (unmaskedBuilder_ == null) { unmasked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); onChanged(); } else { unmaskedBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000100); return this; } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getUnmaskedBuilder() { bitField0_ |= 0x00000100; onChanged(); return getUnmaskedFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getUnmaskedOrBuilder() { if (unmaskedBuilder_ != null) { return unmaskedBuilder_.getMessageOrBuilder(); } else { return unmasked_; } } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 9; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> getUnmaskedFieldBuilder() { if (unmaskedBuilder_ == null) { unmaskedBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>( unmasked_, getParentForChildren(), isClean()); unmasked_ = null; } return unmaskedBuilder_; } // optional string ecPolicyName = 10; private java.lang.Object ecPolicyName_ = ""; /** * optional string ecPolicyName = 10; */ public boolean hasEcPolicyName() { return ((bitField0_ & 0x00000200) == 0x00000200); } /** * optional string ecPolicyName = 10; */ public java.lang.String getEcPolicyName() { java.lang.Object ref = ecPolicyName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); ecPolicyName_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string ecPolicyName = 10; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getEcPolicyNameBytes() { java.lang.Object ref = ecPolicyName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ecPolicyName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string ecPolicyName = 10; */ public Builder setEcPolicyName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000200; ecPolicyName_ = value; onChanged(); return this; } /** * optional string ecPolicyName = 10; */ public Builder clearEcPolicyName() { bitField0_ = (bitField0_ & ~0x00000200); ecPolicyName_ = getDefaultInstance().getEcPolicyName(); onChanged(); return this; } /** * optional string ecPolicyName = 10; */ public Builder setEcPolicyNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000200; ecPolicyName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CreateRequestProto) } static { defaultInstance = new CreateRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CreateRequestProto) } public interface CreateResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ boolean hasFs(); /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs(); /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.CreateResponseProto} */ public static final class CreateResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CreateResponseProtoOrBuilder { // Use CreateResponseProto.newBuilder() to construct. private CreateResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CreateResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CreateResponseProto defaultInstance; public static CreateResponseProto getDefaultInstance() { return defaultInstance; } public CreateResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CreateResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = fs_.toBuilder(); } fs_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(fs_); fs_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CreateResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CreateResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; public static final int FS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public boolean hasFs() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { return fs_; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { return fs_; } private void initFields() { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (hasFs()) { if (!getFs().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, fs_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, fs_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto) obj; boolean result = true; result = result && (hasFs() == other.hasFs()); if (hasFs()) { result = result && getFs() .equals(other.getFs()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasFs()) { hash = (37 * hash) + FS_FIELD_NUMBER; hash = (53 * hash) + getFs().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CreateResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getFsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (fsBuilder_ == null) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } else { fsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (fsBuilder_ == null) { result.fs_ = fs_; } else { result.fs_ = fsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance()) return this; if (other.hasFs()) { mergeFs(other.getFs()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (hasFs()) { if (!getFs().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> fsBuilder_; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public boolean hasFs() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { if (fsBuilder_ == null) { return fs_; } else { return fsBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder setFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (fsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } fs_ = value; onChanged(); } else { fsBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder setFs( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (fsBuilder_ == null) { fs_ = builderForValue.build(); onChanged(); } else { fsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder mergeFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (fsBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && fs_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(fs_).mergeFrom(value).buildPartial(); } else { fs_ = value; } onChanged(); } else { fsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder clearFs() { if (fsBuilder_ == null) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); onChanged(); } else { fsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getFsBuilder() { bitField0_ |= 0x00000001; onChanged(); return getFsFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { if (fsBuilder_ != null) { return fsBuilder_.getMessageOrBuilder(); } else { return fs_; } } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getFsFieldBuilder() { if (fsBuilder_ == null) { fsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( fs_, getParentForChildren(), isClean()); fs_ = null; } return fsBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CreateResponseProto) } static { defaultInstance = new CreateResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CreateResponseProto) } public interface AppendRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required string clientName = 2; /** * required string clientName = 2; */ boolean hasClientName(); /** * required string clientName = 2; */ java.lang.String getClientName(); /** * required string clientName = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes(); // optional uint32 flag = 3; /** * optional uint32 flag = 3; * *
     * bits set using CreateFlag
     * 
*/ boolean hasFlag(); /** * optional uint32 flag = 3; * *
     * bits set using CreateFlag
     * 
*/ int getFlag(); } /** * Protobuf type {@code hadoop.hdfs.AppendRequestProto} */ public static final class AppendRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AppendRequestProtoOrBuilder { // Use AppendRequestProto.newBuilder() to construct. private AppendRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AppendRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AppendRequestProto defaultInstance; public static AppendRequestProto getDefaultInstance() { return defaultInstance; } public AppendRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AppendRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; clientName_ = input.readBytes(); break; } case 24: { bitField0_ |= 0x00000004; flag_ = input.readUInt32(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AppendRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AppendRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AppendRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AppendRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string clientName = 2; public static final int CLIENTNAME_FIELD_NUMBER = 2; private java.lang.Object clientName_; /** * required string clientName = 2; */ public boolean hasClientName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string clientName = 2; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * required string clientName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional uint32 flag = 3; public static final int FLAG_FIELD_NUMBER = 3; private int flag_; /** * optional uint32 flag = 3; * *
     * bits set using CreateFlag
     * 
*/ public boolean hasFlag() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint32 flag = 3; * *
     * bits set using CreateFlag
     * 
*/ public int getFlag() { return flag_; } private void initFields() { src_ = ""; clientName_ = ""; flag_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasClientName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getClientNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt32(3, flag_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getClientNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt32Size(3, flag_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasClientName() == other.hasClientName()); if (hasClientName()) { result = result && getClientName() .equals(other.getClientName()); } result = result && (hasFlag() == other.hasFlag()); if (hasFlag()) { result = result && (getFlag() == other.getFlag()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } if (hasFlag()) { hash = (37 * hash) + FLAG_FIELD_NUMBER; hash = (53 * hash) + getFlag(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AppendRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AppendRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AppendRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); flag_ = 0; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AppendRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.clientName_ = clientName_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.flag_ = flag_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasClientName()) { bitField0_ |= 0x00000002; clientName_ = other.clientName_; onChanged(); } if (other.hasFlag()) { setFlag(other.getFlag()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasClientName()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required string clientName = 2; private java.lang.Object clientName_ = ""; /** * required string clientName = 2; */ public boolean hasClientName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string clientName = 2; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientName = 2; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientName_ = value; onChanged(); return this; } /** * required string clientName = 2; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000002); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * required string clientName = 2; */ public Builder setClientNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientName_ = value; onChanged(); return this; } // optional uint32 flag = 3; private int flag_ ; /** * optional uint32 flag = 3; * *
       * bits set using CreateFlag
       * 
*/ public boolean hasFlag() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint32 flag = 3; * *
       * bits set using CreateFlag
       * 
*/ public int getFlag() { return flag_; } /** * optional uint32 flag = 3; * *
       * bits set using CreateFlag
       * 
*/ public Builder setFlag(int value) { bitField0_ |= 0x00000004; flag_ = value; onChanged(); return this; } /** * optional uint32 flag = 3; * *
       * bits set using CreateFlag
       * 
*/ public Builder clearFlag() { bitField0_ = (bitField0_ & ~0x00000004); flag_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AppendRequestProto) } static { defaultInstance = new AppendRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AppendRequestProto) } public interface AppendResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional .hadoop.hdfs.LocatedBlockProto block = 1; /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ boolean hasBlock(); /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock(); /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder(); // optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ boolean hasStat(); /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getStat(); /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getStatOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.AppendResponseProto} */ public static final class AppendResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AppendResponseProtoOrBuilder { // Use AppendResponseProto.newBuilder() to construct. private AppendResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AppendResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AppendResponseProto defaultInstance; public static AppendResponseProto getDefaultInstance() { return defaultInstance; } public AppendResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AppendResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = block_.toBuilder(); } block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(block_); block_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = stat_.toBuilder(); } stat_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(stat_); stat_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AppendResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AppendResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AppendResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AppendResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional .hadoop.hdfs.LocatedBlockProto block = 1; public static final int BLOCK_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_; /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { return block_; } /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { return block_; } // optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; public static final int STAT_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto stat_; /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ public boolean hasStat() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getStat() { return stat_; } /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getStatOrBuilder() { return stat_; } private void initFields() { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); stat_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (hasBlock()) { if (!getBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } } if (hasStat()) { if (!getStat().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, block_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, stat_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, block_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(2, stat_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto) obj; boolean result = true; result = result && (hasBlock() == other.hasBlock()); if (hasBlock()) { result = result && getBlock() .equals(other.getBlock()); } result = result && (hasStat() == other.hasStat()); if (hasStat()) { result = result && getStat() .equals(other.getStat()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBlock()) { hash = (37 * hash) + BLOCK_FIELD_NUMBER; hash = (53 * hash) + getBlock().hashCode(); } if (hasStat()) { hash = (37 * hash) + STAT_FIELD_NUMBER; hash = (53 * hash) + getStat().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AppendResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AppendResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AppendResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBlockFieldBuilder(); getStatFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (statBuilder_ == null) { stat_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } else { statBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AppendResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (blockBuilder_ == null) { result.block_ = block_; } else { result.block_ = blockBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (statBuilder_ == null) { result.stat_ = stat_; } else { result.stat_ = statBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance()) return this; if (other.hasBlock()) { mergeBlock(other.getBlock()); } if (other.hasStat()) { mergeStat(other.getStat()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (hasBlock()) { if (!getBlock().isInitialized()) { return false; } } if (hasStat()) { if (!getStat().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional .hadoop.hdfs.LocatedBlockProto block = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_; /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { if (blockBuilder_ == null) { return block_; } else { return blockBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } block_ = value; onChanged(); } else { blockBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder setBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blockBuilder_ == null) { block_ = builderForValue.build(); onChanged(); } else { blockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blockBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); } else { block_ = value; } onChanged(); } else { blockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder clearBlock() { if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); onChanged(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() { bitField0_ |= 0x00000001; onChanged(); return getBlockFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { if (blockBuilder_ != null) { return blockBuilder_.getMessageOrBuilder(); } else { return block_; } } /** * optional .hadoop.hdfs.LocatedBlockProto block = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getBlockFieldBuilder() { if (blockBuilder_ == null) { blockBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( block_, getParentForChildren(), isClean()); block_ = null; } return blockBuilder_; } // optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto stat_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> statBuilder_; /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ public boolean hasStat() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getStat() { if (statBuilder_ == null) { return stat_; } else { return statBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ public Builder setStat(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (statBuilder_ == null) { if (value == null) { throw new NullPointerException(); } stat_ = value; onChanged(); } else { statBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ public Builder setStat( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (statBuilder_ == null) { stat_ = builderForValue.build(); onChanged(); } else { statBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ public Builder mergeStat(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (statBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && stat_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) { stat_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(stat_).mergeFrom(value).buildPartial(); } else { stat_ = value; } onChanged(); } else { statBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ public Builder clearStat() { if (statBuilder_ == null) { stat_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); onChanged(); } else { statBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getStatBuilder() { bitField0_ |= 0x00000002; onChanged(); return getStatFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getStatOrBuilder() { if (statBuilder_ != null) { return statBuilder_.getMessageOrBuilder(); } else { return stat_; } } /** * optional .hadoop.hdfs.HdfsFileStatusProto stat = 2; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getStatFieldBuilder() { if (statBuilder_ == null) { statBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( stat_, getParentForChildren(), isClean()); stat_ = null; } return statBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AppendResponseProto) } static { defaultInstance = new AppendResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AppendResponseProto) } public interface SetReplicationRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required uint32 replication = 2; /** * required uint32 replication = 2; * *
     * Short: Only 16 bits used
     * 
*/ boolean hasReplication(); /** * required uint32 replication = 2; * *
     * Short: Only 16 bits used
     * 
*/ int getReplication(); } /** * Protobuf type {@code hadoop.hdfs.SetReplicationRequestProto} */ public static final class SetReplicationRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetReplicationRequestProtoOrBuilder { // Use SetReplicationRequestProto.newBuilder() to construct. private SetReplicationRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetReplicationRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetReplicationRequestProto defaultInstance; public static SetReplicationRequestProto getDefaultInstance() { return defaultInstance; } public SetReplicationRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetReplicationRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; replication_ = input.readUInt32(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetReplicationRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetReplicationRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetReplicationRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetReplicationRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required uint32 replication = 2; public static final int REPLICATION_FIELD_NUMBER = 2; private int replication_; /** * required uint32 replication = 2; * *
     * Short: Only 16 bits used
     * 
*/ public boolean hasReplication() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 replication = 2; * *
     * Short: Only 16 bits used
     * 
*/ public int getReplication() { return replication_; } private void initFields() { src_ = ""; replication_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasReplication()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt32(2, replication_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt32Size(2, replication_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasReplication() == other.hasReplication()); if (hasReplication()) { result = result && (getReplication() == other.getReplication()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasReplication()) { hash = (37 * hash) + REPLICATION_FIELD_NUMBER; hash = (53 * hash) + getReplication(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetReplicationRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetReplicationRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetReplicationRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); replication_ = 0; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetReplicationRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.replication_ = replication_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasReplication()) { setReplication(other.getReplication()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasReplication()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required uint32 replication = 2; private int replication_ ; /** * required uint32 replication = 2; * *
       * Short: Only 16 bits used
       * 
*/ public boolean hasReplication() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint32 replication = 2; * *
       * Short: Only 16 bits used
       * 
*/ public int getReplication() { return replication_; } /** * required uint32 replication = 2; * *
       * Short: Only 16 bits used
       * 
*/ public Builder setReplication(int value) { bitField0_ |= 0x00000002; replication_ = value; onChanged(); return this; } /** * required uint32 replication = 2; * *
       * Short: Only 16 bits used
       * 
*/ public Builder clearReplication() { bitField0_ = (bitField0_ & ~0x00000002); replication_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetReplicationRequestProto) } static { defaultInstance = new SetReplicationRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetReplicationRequestProto) } public interface SetReplicationResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required bool result = 1; /** * required bool result = 1; */ boolean hasResult(); /** * required bool result = 1; */ boolean getResult(); } /** * Protobuf type {@code hadoop.hdfs.SetReplicationResponseProto} */ public static final class SetReplicationResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetReplicationResponseProtoOrBuilder { // Use SetReplicationResponseProto.newBuilder() to construct. private SetReplicationResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetReplicationResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetReplicationResponseProto defaultInstance; public static SetReplicationResponseProto getDefaultInstance() { return defaultInstance; } public SetReplicationResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetReplicationResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; result_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetReplicationResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetReplicationResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetReplicationResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetReplicationResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool result = 1; public static final int RESULT_FIELD_NUMBER = 1; private boolean result_; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } private void initFields() { result_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasResult()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, result_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, result_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto) obj; boolean result = true; result = result && (hasResult() == other.hasResult()); if (hasResult()) { result = result && (getResult() == other.getResult()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getResult()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetReplicationResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetReplicationResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetReplicationResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); result_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetReplicationResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.result_ = result_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance()) return this; if (other.hasResult()) { setResult(other.getResult()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasResult()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool result = 1; private boolean result_ ; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } /** * required bool result = 1; */ public Builder setResult(boolean value) { bitField0_ |= 0x00000001; result_ = value; onChanged(); return this; } /** * required bool result = 1; */ public Builder clearResult() { bitField0_ = (bitField0_ & ~0x00000001); result_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetReplicationResponseProto) } static { defaultInstance = new SetReplicationResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetReplicationResponseProto) } public interface SetStoragePolicyRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required string policyName = 2; /** * required string policyName = 2; */ boolean hasPolicyName(); /** * required string policyName = 2; */ java.lang.String getPolicyName(); /** * required string policyName = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPolicyNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.SetStoragePolicyRequestProto} */ public static final class SetStoragePolicyRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetStoragePolicyRequestProtoOrBuilder { // Use SetStoragePolicyRequestProto.newBuilder() to construct. private SetStoragePolicyRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetStoragePolicyRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetStoragePolicyRequestProto defaultInstance; public static SetStoragePolicyRequestProto getDefaultInstance() { return defaultInstance; } public SetStoragePolicyRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetStoragePolicyRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; policyName_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetStoragePolicyRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetStoragePolicyRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetStoragePolicyRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetStoragePolicyRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string policyName = 2; public static final int POLICYNAME_FIELD_NUMBER = 2; private java.lang.Object policyName_; /** * required string policyName = 2; */ public boolean hasPolicyName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string policyName = 2; */ public java.lang.String getPolicyName() { java.lang.Object ref = policyName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { policyName_ = s; } return s; } } /** * required string policyName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPolicyNameBytes() { java.lang.Object ref = policyName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); policyName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { src_ = ""; policyName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasPolicyName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getPolicyNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getPolicyNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasPolicyName() == other.hasPolicyName()); if (hasPolicyName()) { result = result && getPolicyName() .equals(other.getPolicyName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasPolicyName()) { hash = (37 * hash) + POLICYNAME_FIELD_NUMBER; hash = (53 * hash) + getPolicyName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetStoragePolicyRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetStoragePolicyRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetStoragePolicyRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); policyName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetStoragePolicyRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.policyName_ = policyName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasPolicyName()) { bitField0_ |= 0x00000002; policyName_ = other.policyName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasPolicyName()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required string policyName = 2; private java.lang.Object policyName_ = ""; /** * required string policyName = 2; */ public boolean hasPolicyName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string policyName = 2; */ public java.lang.String getPolicyName() { java.lang.Object ref = policyName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); policyName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string policyName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPolicyNameBytes() { java.lang.Object ref = policyName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); policyName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string policyName = 2; */ public Builder setPolicyName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; policyName_ = value; onChanged(); return this; } /** * required string policyName = 2; */ public Builder clearPolicyName() { bitField0_ = (bitField0_ & ~0x00000002); policyName_ = getDefaultInstance().getPolicyName(); onChanged(); return this; } /** * required string policyName = 2; */ public Builder setPolicyNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; policyName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetStoragePolicyRequestProto) } static { defaultInstance = new SetStoragePolicyRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetStoragePolicyRequestProto) } public interface SetStoragePolicyResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.SetStoragePolicyResponseProto} * *
   * void response
   * 
*/ public static final class SetStoragePolicyResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetStoragePolicyResponseProtoOrBuilder { // Use SetStoragePolicyResponseProto.newBuilder() to construct. private SetStoragePolicyResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetStoragePolicyResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetStoragePolicyResponseProto defaultInstance; public static SetStoragePolicyResponseProto getDefaultInstance() { return defaultInstance; } public SetStoragePolicyResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetStoragePolicyResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetStoragePolicyResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetStoragePolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetStoragePolicyResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetStoragePolicyResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetStoragePolicyResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetStoragePolicyResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetStoragePolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetStoragePolicyResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetStoragePolicyResponseProto) } static { defaultInstance = new SetStoragePolicyResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetStoragePolicyResponseProto) } public interface UnsetStoragePolicyRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); } /** * Protobuf type {@code hadoop.hdfs.UnsetStoragePolicyRequestProto} */ public static final class UnsetStoragePolicyRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements UnsetStoragePolicyRequestProtoOrBuilder { // Use UnsetStoragePolicyRequestProto.newBuilder() to construct. private UnsetStoragePolicyRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private UnsetStoragePolicyRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final UnsetStoragePolicyRequestProto defaultInstance; public static UnsetStoragePolicyRequestProto getDefaultInstance() { return defaultInstance; } public UnsetStoragePolicyRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private UnsetStoragePolicyRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UnsetStoragePolicyRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UnsetStoragePolicyRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public UnsetStoragePolicyRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new UnsetStoragePolicyRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { src_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.UnsetStoragePolicyRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UnsetStoragePolicyRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UnsetStoragePolicyRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UnsetStoragePolicyRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.UnsetStoragePolicyRequestProto) } static { defaultInstance = new UnsetStoragePolicyRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.UnsetStoragePolicyRequestProto) } public interface UnsetStoragePolicyResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.UnsetStoragePolicyResponseProto} */ public static final class UnsetStoragePolicyResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements UnsetStoragePolicyResponseProtoOrBuilder { // Use UnsetStoragePolicyResponseProto.newBuilder() to construct. private UnsetStoragePolicyResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private UnsetStoragePolicyResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final UnsetStoragePolicyResponseProto defaultInstance; public static UnsetStoragePolicyResponseProto getDefaultInstance() { return defaultInstance; } public UnsetStoragePolicyResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private UnsetStoragePolicyResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UnsetStoragePolicyResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UnsetStoragePolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public UnsetStoragePolicyResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new UnsetStoragePolicyResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.UnsetStoragePolicyResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UnsetStoragePolicyResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UnsetStoragePolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UnsetStoragePolicyResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.UnsetStoragePolicyResponseProto) } static { defaultInstance = new UnsetStoragePolicyResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.UnsetStoragePolicyResponseProto) } public interface GetStoragePolicyRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string path = 1; /** * required string path = 1; */ boolean hasPath(); /** * required string path = 1; */ java.lang.String getPath(); /** * required string path = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes(); } /** * Protobuf type {@code hadoop.hdfs.GetStoragePolicyRequestProto} */ public static final class GetStoragePolicyRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetStoragePolicyRequestProtoOrBuilder { // Use GetStoragePolicyRequestProto.newBuilder() to construct. private GetStoragePolicyRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetStoragePolicyRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetStoragePolicyRequestProto defaultInstance; public static GetStoragePolicyRequestProto getDefaultInstance() { return defaultInstance; } public GetStoragePolicyRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetStoragePolicyRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; path_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePolicyRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePolicyRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetStoragePolicyRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetStoragePolicyRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string path = 1; public static final int PATH_FIELD_NUMBER = 1; private java.lang.Object path_; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { path_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPath()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getPathBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getPathBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto) obj; boolean result = true; result = result && (hasPath() == other.hasPath()); if (hasPath()) { result = result && getPath() .equals(other.getPath()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetStoragePolicyRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePolicyRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePolicyRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePolicyRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.path_ = path_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto.getDefaultInstance()) return this; if (other.hasPath()) { bitField0_ |= 0x00000001; path_ = other.path_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPath()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string path = 1; private java.lang.Object path_ = ""; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); path_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string path = 1; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } /** * required string path = 1; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000001); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * required string path = 1; */ public Builder setPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetStoragePolicyRequestProto) } static { defaultInstance = new GetStoragePolicyRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetStoragePolicyRequestProto) } public interface GetStoragePolicyResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ boolean hasStoragePolicy(); /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getStoragePolicy(); /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder getStoragePolicyOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetStoragePolicyResponseProto} */ public static final class GetStoragePolicyResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetStoragePolicyResponseProtoOrBuilder { // Use GetStoragePolicyResponseProto.newBuilder() to construct. private GetStoragePolicyResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetStoragePolicyResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetStoragePolicyResponseProto defaultInstance; public static GetStoragePolicyResponseProto getDefaultInstance() { return defaultInstance; } public GetStoragePolicyResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetStoragePolicyResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = storagePolicy_.toBuilder(); } storagePolicy_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(storagePolicy_); storagePolicy_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePolicyResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetStoragePolicyResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetStoragePolicyResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; public static final int STORAGEPOLICY_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto storagePolicy_; /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ public boolean hasStoragePolicy() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getStoragePolicy() { return storagePolicy_; } /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder getStoragePolicyOrBuilder() { return storagePolicy_; } private void initFields() { storagePolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasStoragePolicy()) { memoizedIsInitialized = 0; return false; } if (!getStoragePolicy().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, storagePolicy_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, storagePolicy_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto) obj; boolean result = true; result = result && (hasStoragePolicy() == other.hasStoragePolicy()); if (hasStoragePolicy()) { result = result && getStoragePolicy() .equals(other.getStoragePolicy()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasStoragePolicy()) { hash = (37 * hash) + STORAGEPOLICY_FIELD_NUMBER; hash = (53 * hash) + getStoragePolicy().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetStoragePolicyResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePolicyResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getStoragePolicyFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (storagePolicyBuilder_ == null) { storagePolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance(); } else { storagePolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePolicyResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (storagePolicyBuilder_ == null) { result.storagePolicy_ = storagePolicy_; } else { result.storagePolicy_ = storagePolicyBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.getDefaultInstance()) return this; if (other.hasStoragePolicy()) { mergeStoragePolicy(other.getStoragePolicy()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasStoragePolicy()) { return false; } if (!getStoragePolicy().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto storagePolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder> storagePolicyBuilder_; /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ public boolean hasStoragePolicy() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getStoragePolicy() { if (storagePolicyBuilder_ == null) { return storagePolicy_; } else { return storagePolicyBuilder_.getMessage(); } } /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ public Builder setStoragePolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto value) { if (storagePolicyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } storagePolicy_ = value; onChanged(); } else { storagePolicyBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ public Builder setStoragePolicy( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder builderForValue) { if (storagePolicyBuilder_ == null) { storagePolicy_ = builderForValue.build(); onChanged(); } else { storagePolicyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ public Builder mergeStoragePolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto value) { if (storagePolicyBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && storagePolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance()) { storagePolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.newBuilder(storagePolicy_).mergeFrom(value).buildPartial(); } else { storagePolicy_ = value; } onChanged(); } else { storagePolicyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ public Builder clearStoragePolicy() { if (storagePolicyBuilder_ == null) { storagePolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance(); onChanged(); } else { storagePolicyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder getStoragePolicyBuilder() { bitField0_ |= 0x00000001; onChanged(); return getStoragePolicyFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder getStoragePolicyOrBuilder() { if (storagePolicyBuilder_ != null) { return storagePolicyBuilder_.getMessageOrBuilder(); } else { return storagePolicy_; } } /** * required .hadoop.hdfs.BlockStoragePolicyProto storagePolicy = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder> getStoragePolicyFieldBuilder() { if (storagePolicyBuilder_ == null) { storagePolicyBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder>( storagePolicy_, getParentForChildren(), isClean()); storagePolicy_ = null; } return storagePolicyBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetStoragePolicyResponseProto) } static { defaultInstance = new GetStoragePolicyResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetStoragePolicyResponseProto) } public interface GetStoragePoliciesRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.GetStoragePoliciesRequestProto} * *
   * void request
   * 
*/ public static final class GetStoragePoliciesRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetStoragePoliciesRequestProtoOrBuilder { // Use GetStoragePoliciesRequestProto.newBuilder() to construct. private GetStoragePoliciesRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetStoragePoliciesRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetStoragePoliciesRequestProto defaultInstance; public static GetStoragePoliciesRequestProto getDefaultInstance() { return defaultInstance; } public GetStoragePoliciesRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetStoragePoliciesRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePoliciesRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePoliciesRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetStoragePoliciesRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetStoragePoliciesRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetStoragePoliciesRequestProto} * *
     * void request
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePoliciesRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePoliciesRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePoliciesRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetStoragePoliciesRequestProto) } static { defaultInstance = new GetStoragePoliciesRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetStoragePoliciesRequestProto) } public interface GetStoragePoliciesResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ java.util.List getPoliciesList(); /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getPolicies(int index); /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ int getPoliciesCount(); /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ java.util.List getPoliciesOrBuilderList(); /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder getPoliciesOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.GetStoragePoliciesResponseProto} */ public static final class GetStoragePoliciesResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetStoragePoliciesResponseProtoOrBuilder { // Use GetStoragePoliciesResponseProto.newBuilder() to construct. private GetStoragePoliciesResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetStoragePoliciesResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetStoragePoliciesResponseProto defaultInstance; public static GetStoragePoliciesResponseProto getDefaultInstance() { return defaultInstance; } public GetStoragePoliciesResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetStoragePoliciesResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { policies_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } policies_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.PARSER, extensionRegistry)); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { policies_ = java.util.Collections.unmodifiableList(policies_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePoliciesResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePoliciesResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetStoragePoliciesResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetStoragePoliciesResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } // repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; public static final int POLICIES_FIELD_NUMBER = 1; private java.util.List policies_; /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public java.util.List getPoliciesList() { return policies_; } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public java.util.List getPoliciesOrBuilderList() { return policies_; } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public int getPoliciesCount() { return policies_.size(); } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getPolicies(int index) { return policies_.get(index); } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder getPoliciesOrBuilder( int index) { return policies_.get(index); } private void initFields() { policies_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getPoliciesCount(); i++) { if (!getPolicies(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < policies_.size(); i++) { output.writeMessage(1, policies_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < policies_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, policies_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto) obj; boolean result = true; result = result && getPoliciesList() .equals(other.getPoliciesList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getPoliciesCount() > 0) { hash = (37 * hash) + POLICIES_FIELD_NUMBER; hash = (53 * hash) + getPoliciesList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetStoragePoliciesResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePoliciesResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePoliciesResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getPoliciesFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (policiesBuilder_ == null) { policies_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { policiesBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetStoragePoliciesResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto(this); int from_bitField0_ = bitField0_; if (policiesBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { policies_ = java.util.Collections.unmodifiableList(policies_); bitField0_ = (bitField0_ & ~0x00000001); } result.policies_ = policies_; } else { result.policies_ = policiesBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.getDefaultInstance()) return this; if (policiesBuilder_ == null) { if (!other.policies_.isEmpty()) { if (policies_.isEmpty()) { policies_ = other.policies_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensurePoliciesIsMutable(); policies_.addAll(other.policies_); } onChanged(); } } else { if (!other.policies_.isEmpty()) { if (policiesBuilder_.isEmpty()) { policiesBuilder_.dispose(); policiesBuilder_ = null; policies_ = other.policies_; bitField0_ = (bitField0_ & ~0x00000001); policiesBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getPoliciesFieldBuilder() : null; } else { policiesBuilder_.addAllMessages(other.policies_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getPoliciesCount(); i++) { if (!getPolicies(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; private java.util.List policies_ = java.util.Collections.emptyList(); private void ensurePoliciesIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { policies_ = new java.util.ArrayList(policies_); bitField0_ |= 0x00000001; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder> policiesBuilder_; /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public java.util.List getPoliciesList() { if (policiesBuilder_ == null) { return java.util.Collections.unmodifiableList(policies_); } else { return policiesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public int getPoliciesCount() { if (policiesBuilder_ == null) { return policies_.size(); } else { return policiesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getPolicies(int index) { if (policiesBuilder_ == null) { return policies_.get(index); } else { return policiesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public Builder setPolicies( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto value) { if (policiesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePoliciesIsMutable(); policies_.set(index, value); onChanged(); } else { policiesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public Builder setPolicies( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder builderForValue) { if (policiesBuilder_ == null) { ensurePoliciesIsMutable(); policies_.set(index, builderForValue.build()); onChanged(); } else { policiesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public Builder addPolicies(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto value) { if (policiesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePoliciesIsMutable(); policies_.add(value); onChanged(); } else { policiesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public Builder addPolicies( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto value) { if (policiesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensurePoliciesIsMutable(); policies_.add(index, value); onChanged(); } else { policiesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public Builder addPolicies( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder builderForValue) { if (policiesBuilder_ == null) { ensurePoliciesIsMutable(); policies_.add(builderForValue.build()); onChanged(); } else { policiesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public Builder addPolicies( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder builderForValue) { if (policiesBuilder_ == null) { ensurePoliciesIsMutable(); policies_.add(index, builderForValue.build()); onChanged(); } else { policiesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public Builder addAllPolicies( java.lang.Iterable values) { if (policiesBuilder_ == null) { ensurePoliciesIsMutable(); super.addAll(values, policies_); onChanged(); } else { policiesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public Builder clearPolicies() { if (policiesBuilder_ == null) { policies_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { policiesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public Builder removePolicies(int index) { if (policiesBuilder_ == null) { ensurePoliciesIsMutable(); policies_.remove(index); onChanged(); } else { policiesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder getPoliciesBuilder( int index) { return getPoliciesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder getPoliciesOrBuilder( int index) { if (policiesBuilder_ == null) { return policies_.get(index); } else { return policiesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public java.util.List getPoliciesOrBuilderList() { if (policiesBuilder_ != null) { return policiesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(policies_); } } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder addPoliciesBuilder() { return getPoliciesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder addPoliciesBuilder( int index) { return getPoliciesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.BlockStoragePolicyProto policies = 1; */ public java.util.List getPoliciesBuilderList() { return getPoliciesFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder> getPoliciesFieldBuilder() { if (policiesBuilder_ == null) { policiesBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder>( policies_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); policies_ = null; } return policiesBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetStoragePoliciesResponseProto) } static { defaultInstance = new GetStoragePoliciesResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetStoragePoliciesResponseProto) } public interface SetPermissionRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required .hadoop.hdfs.FsPermissionProto permission = 2; /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ boolean hasPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission(); /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.SetPermissionRequestProto} */ public static final class SetPermissionRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetPermissionRequestProtoOrBuilder { // Use SetPermissionRequestProto.newBuilder() to construct. private SetPermissionRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetPermissionRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetPermissionRequestProto defaultInstance; public static SetPermissionRequestProto getDefaultInstance() { return defaultInstance; } public SetPermissionRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetPermissionRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = permission_.toBuilder(); } permission_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(permission_); permission_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetPermissionRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetPermissionRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetPermissionRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetPermissionRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required .hadoop.hdfs.FsPermissionProto permission = 2; public static final int PERMISSION_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_; /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ public boolean hasPermission() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() { return permission_; } /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { return permission_; } private void initFields() { src_ = ""; permission_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasPermission()) { memoizedIsInitialized = 0; return false; } if (!getPermission().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, permission_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(2, permission_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasPermission() == other.hasPermission()); if (hasPermission()) { result = result && getPermission() .equals(other.getPermission()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasPermission()) { hash = (37 * hash) + PERMISSION_FIELD_NUMBER; hash = (53 * hash) + getPermission().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetPermissionRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetPermissionRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetPermissionRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getPermissionFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (permissionBuilder_ == null) { permission_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); } else { permissionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetPermissionRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (permissionBuilder_ == null) { result.permission_ = permission_; } else { result.permission_ = permissionBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasPermission()) { mergePermission(other.getPermission()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasPermission()) { return false; } if (!getPermission().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required .hadoop.hdfs.FsPermissionProto permission = 2; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> permissionBuilder_; /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ public boolean hasPermission() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() { if (permissionBuilder_ == null) { return permission_; } else { return permissionBuilder_.getMessage(); } } /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } permission_ = value; onChanged(); } else { permissionBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ public Builder setPermission( org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) { if (permissionBuilder_ == null) { permission_ = builderForValue.build(); onChanged(); } else { permissionBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (permissionBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && permission_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) { permission_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.newBuilder(permission_).mergeFrom(value).buildPartial(); } else { permission_ = value; } onChanged(); } else { permissionBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ public Builder clearPermission() { if (permissionBuilder_ == null) { permission_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); onChanged(); } else { permissionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getPermissionBuilder() { bitField0_ |= 0x00000002; onChanged(); return getPermissionFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { if (permissionBuilder_ != null) { return permissionBuilder_.getMessageOrBuilder(); } else { return permission_; } } /** * required .hadoop.hdfs.FsPermissionProto permission = 2; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> getPermissionFieldBuilder() { if (permissionBuilder_ == null) { permissionBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>( permission_, getParentForChildren(), isClean()); permission_ = null; } return permissionBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetPermissionRequestProto) } static { defaultInstance = new SetPermissionRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetPermissionRequestProto) } public interface SetPermissionResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.SetPermissionResponseProto} * *
   * void response
   * 
*/ public static final class SetPermissionResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetPermissionResponseProtoOrBuilder { // Use SetPermissionResponseProto.newBuilder() to construct. private SetPermissionResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetPermissionResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetPermissionResponseProto defaultInstance; public static SetPermissionResponseProto getDefaultInstance() { return defaultInstance; } public SetPermissionResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetPermissionResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetPermissionResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetPermissionResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetPermissionResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetPermissionResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetPermissionResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetPermissionResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetPermissionResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetPermissionResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetPermissionResponseProto) } static { defaultInstance = new SetPermissionResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetPermissionResponseProto) } public interface SetOwnerRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // optional string username = 2; /** * optional string username = 2; */ boolean hasUsername(); /** * optional string username = 2; */ java.lang.String getUsername(); /** * optional string username = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getUsernameBytes(); // optional string groupname = 3; /** * optional string groupname = 3; */ boolean hasGroupname(); /** * optional string groupname = 3; */ java.lang.String getGroupname(); /** * optional string groupname = 3; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getGroupnameBytes(); } /** * Protobuf type {@code hadoop.hdfs.SetOwnerRequestProto} */ public static final class SetOwnerRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetOwnerRequestProtoOrBuilder { // Use SetOwnerRequestProto.newBuilder() to construct. private SetOwnerRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetOwnerRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetOwnerRequestProto defaultInstance; public static SetOwnerRequestProto getDefaultInstance() { return defaultInstance; } public SetOwnerRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetOwnerRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; username_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; groupname_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetOwnerRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetOwnerRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetOwnerRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetOwnerRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional string username = 2; public static final int USERNAME_FIELD_NUMBER = 2; private java.lang.Object username_; /** * optional string username = 2; */ public boolean hasUsername() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string username = 2; */ public java.lang.String getUsername() { java.lang.Object ref = username_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { username_ = s; } return s; } } /** * optional string username = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getUsernameBytes() { java.lang.Object ref = username_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); username_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional string groupname = 3; public static final int GROUPNAME_FIELD_NUMBER = 3; private java.lang.Object groupname_; /** * optional string groupname = 3; */ public boolean hasGroupname() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional string groupname = 3; */ public java.lang.String getGroupname() { java.lang.Object ref = groupname_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { groupname_ = s; } return s; } } /** * optional string groupname = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getGroupnameBytes() { java.lang.Object ref = groupname_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); groupname_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { src_ = ""; username_ = ""; groupname_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getUsernameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getGroupnameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getUsernameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(3, getGroupnameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasUsername() == other.hasUsername()); if (hasUsername()) { result = result && getUsername() .equals(other.getUsername()); } result = result && (hasGroupname() == other.hasGroupname()); if (hasGroupname()) { result = result && getGroupname() .equals(other.getGroupname()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasUsername()) { hash = (37 * hash) + USERNAME_FIELD_NUMBER; hash = (53 * hash) + getUsername().hashCode(); } if (hasGroupname()) { hash = (37 * hash) + GROUPNAME_FIELD_NUMBER; hash = (53 * hash) + getGroupname().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetOwnerRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetOwnerRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetOwnerRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); username_ = ""; bitField0_ = (bitField0_ & ~0x00000002); groupname_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetOwnerRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.username_ = username_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.groupname_ = groupname_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasUsername()) { bitField0_ |= 0x00000002; username_ = other.username_; onChanged(); } if (other.hasGroupname()) { bitField0_ |= 0x00000004; groupname_ = other.groupname_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // optional string username = 2; private java.lang.Object username_ = ""; /** * optional string username = 2; */ public boolean hasUsername() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string username = 2; */ public java.lang.String getUsername() { java.lang.Object ref = username_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); username_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string username = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getUsernameBytes() { java.lang.Object ref = username_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); username_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string username = 2; */ public Builder setUsername( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; username_ = value; onChanged(); return this; } /** * optional string username = 2; */ public Builder clearUsername() { bitField0_ = (bitField0_ & ~0x00000002); username_ = getDefaultInstance().getUsername(); onChanged(); return this; } /** * optional string username = 2; */ public Builder setUsernameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; username_ = value; onChanged(); return this; } // optional string groupname = 3; private java.lang.Object groupname_ = ""; /** * optional string groupname = 3; */ public boolean hasGroupname() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional string groupname = 3; */ public java.lang.String getGroupname() { java.lang.Object ref = groupname_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); groupname_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string groupname = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getGroupnameBytes() { java.lang.Object ref = groupname_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); groupname_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string groupname = 3; */ public Builder setGroupname( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; groupname_ = value; onChanged(); return this; } /** * optional string groupname = 3; */ public Builder clearGroupname() { bitField0_ = (bitField0_ & ~0x00000004); groupname_ = getDefaultInstance().getGroupname(); onChanged(); return this; } /** * optional string groupname = 3; */ public Builder setGroupnameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; groupname_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetOwnerRequestProto) } static { defaultInstance = new SetOwnerRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetOwnerRequestProto) } public interface SetOwnerResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.SetOwnerResponseProto} * *
   * void response
   * 
*/ public static final class SetOwnerResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetOwnerResponseProtoOrBuilder { // Use SetOwnerResponseProto.newBuilder() to construct. private SetOwnerResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetOwnerResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetOwnerResponseProto defaultInstance; public static SetOwnerResponseProto getDefaultInstance() { return defaultInstance; } public SetOwnerResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetOwnerResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetOwnerResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetOwnerResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetOwnerResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetOwnerResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetOwnerResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetOwnerResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetOwnerResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetOwnerResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetOwnerResponseProto) } static { defaultInstance = new SetOwnerResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetOwnerResponseProto) } public interface AbandonBlockRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.ExtendedBlockProto b = 1; /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ boolean hasB(); /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB(); /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder(); // required string src = 2; /** * required string src = 2; */ boolean hasSrc(); /** * required string src = 2; */ java.lang.String getSrc(); /** * required string src = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required string holder = 3; /** * required string holder = 3; */ boolean hasHolder(); /** * required string holder = 3; */ java.lang.String getHolder(); /** * required string holder = 3; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getHolderBytes(); // optional uint64 fileId = 4 [default = 0]; /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ boolean hasFileId(); /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ long getFileId(); } /** * Protobuf type {@code hadoop.hdfs.AbandonBlockRequestProto} */ public static final class AbandonBlockRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AbandonBlockRequestProtoOrBuilder { // Use AbandonBlockRequestProto.newBuilder() to construct. private AbandonBlockRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AbandonBlockRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AbandonBlockRequestProto defaultInstance; public static AbandonBlockRequestProto getDefaultInstance() { return defaultInstance; } public AbandonBlockRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AbandonBlockRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = b_.toBuilder(); } b_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(b_); b_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { bitField0_ |= 0x00000002; src_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; holder_ = input.readBytes(); break; } case 32: { bitField0_ |= 0x00000008; fileId_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AbandonBlockRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AbandonBlockRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AbandonBlockRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AbandonBlockRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.ExtendedBlockProto b = 1; public static final int B_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_; /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public boolean hasB() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { return b_; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { return b_; } // required string src = 2; public static final int SRC_FIELD_NUMBER = 2; private java.lang.Object src_; /** * required string src = 2; */ public boolean hasSrc() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string src = 2; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string holder = 3; public static final int HOLDER_FIELD_NUMBER = 3; private java.lang.Object holder_; /** * required string holder = 3; */ public boolean hasHolder() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string holder = 3; */ public java.lang.String getHolder() { java.lang.Object ref = holder_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { holder_ = s; } return s; } } /** * required string holder = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getHolderBytes() { java.lang.Object ref = holder_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); holder_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional uint64 fileId = 4 [default = 0]; public static final int FILEID_FIELD_NUMBER = 4; private long fileId_; /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ public long getFileId() { return fileId_; } private void initFields() { b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); src_ = ""; holder_ = ""; fileId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasB()) { memoizedIsInitialized = 0; return false; } if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasHolder()) { memoizedIsInitialized = 0; return false; } if (!getB().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, b_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getSrcBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getHolderBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, fileId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, b_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getSrcBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(3, getHolderBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(4, fileId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto) obj; boolean result = true; result = result && (hasB() == other.hasB()); if (hasB()) { result = result && getB() .equals(other.getB()); } result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasHolder() == other.hasHolder()); if (hasHolder()) { result = result && getHolder() .equals(other.getHolder()); } result = result && (hasFileId() == other.hasFileId()); if (hasFileId()) { result = result && (getFileId() == other.getFileId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasB()) { hash = (37 * hash) + B_FIELD_NUMBER; hash = (53 * hash) + getB().hashCode(); } if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasHolder()) { hash = (37 * hash) + HOLDER_FIELD_NUMBER; hash = (53 * hash) + getHolder().hashCode(); } if (hasFileId()) { hash = (37 * hash) + FILEID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFileId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AbandonBlockRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AbandonBlockRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AbandonBlockRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (bBuilder_ == null) { b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); } else { bBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); src_ = ""; bitField0_ = (bitField0_ & ~0x00000002); holder_ = ""; bitField0_ = (bitField0_ & ~0x00000004); fileId_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AbandonBlockRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (bBuilder_ == null) { result.b_ = b_; } else { result.b_ = bBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.src_ = src_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.holder_ = holder_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.fileId_ = fileId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.getDefaultInstance()) return this; if (other.hasB()) { mergeB(other.getB()); } if (other.hasSrc()) { bitField0_ |= 0x00000002; src_ = other.src_; onChanged(); } if (other.hasHolder()) { bitField0_ |= 0x00000004; holder_ = other.holder_; onChanged(); } if (other.hasFileId()) { setFileId(other.getFileId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasB()) { return false; } if (!hasSrc()) { return false; } if (!hasHolder()) { return false; } if (!getB().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.ExtendedBlockProto b = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> bBuilder_; /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public boolean hasB() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { if (bBuilder_ == null) { return b_; } else { return bBuilder_.getMessage(); } } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder setB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (bBuilder_ == null) { if (value == null) { throw new NullPointerException(); } b_ = value; onChanged(); } else { bBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder setB( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { if (bBuilder_ == null) { b_ = builderForValue.build(); onChanged(); } else { bBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder mergeB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (bBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && b_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(b_).mergeFrom(value).buildPartial(); } else { b_ = value; } onChanged(); } else { bBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public Builder clearB() { if (bBuilder_ == null) { b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); onChanged(); } else { bBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBBuilder() { bitField0_ |= 0x00000001; onChanged(); return getBFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { if (bBuilder_ != null) { return bBuilder_.getMessageOrBuilder(); } else { return b_; } } /** * required .hadoop.hdfs.ExtendedBlockProto b = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> getBFieldBuilder() { if (bBuilder_ == null) { bBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( b_, getParentForChildren(), isClean()); b_ = null; } return bBuilder_; } // required string src = 2; private java.lang.Object src_ = ""; /** * required string src = 2; */ public boolean hasSrc() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string src = 2; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 2; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; src_ = value; onChanged(); return this; } /** * required string src = 2; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000002); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 2; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; src_ = value; onChanged(); return this; } // required string holder = 3; private java.lang.Object holder_ = ""; /** * required string holder = 3; */ public boolean hasHolder() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string holder = 3; */ public java.lang.String getHolder() { java.lang.Object ref = holder_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); holder_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string holder = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getHolderBytes() { java.lang.Object ref = holder_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); holder_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string holder = 3; */ public Builder setHolder( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; holder_ = value; onChanged(); return this; } /** * required string holder = 3; */ public Builder clearHolder() { bitField0_ = (bitField0_ & ~0x00000004); holder_ = getDefaultInstance().getHolder(); onChanged(); return this; } /** * required string holder = 3; */ public Builder setHolderBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; holder_ = value; onChanged(); return this; } // optional uint64 fileId = 4 [default = 0]; private long fileId_ ; /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public long getFileId() { return fileId_; } /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public Builder setFileId(long value) { bitField0_ |= 0x00000008; fileId_ = value; onChanged(); return this; } /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public Builder clearFileId() { bitField0_ = (bitField0_ & ~0x00000008); fileId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AbandonBlockRequestProto) } static { defaultInstance = new AbandonBlockRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AbandonBlockRequestProto) } public interface AbandonBlockResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.AbandonBlockResponseProto} * *
   * void response
   * 
*/ public static final class AbandonBlockResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AbandonBlockResponseProtoOrBuilder { // Use AbandonBlockResponseProto.newBuilder() to construct. private AbandonBlockResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AbandonBlockResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AbandonBlockResponseProto defaultInstance; public static AbandonBlockResponseProto getDefaultInstance() { return defaultInstance; } public AbandonBlockResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AbandonBlockResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AbandonBlockResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AbandonBlockResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AbandonBlockResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AbandonBlockResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AbandonBlockResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AbandonBlockResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AbandonBlockResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AbandonBlockResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AbandonBlockResponseProto) } static { defaultInstance = new AbandonBlockResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AbandonBlockResponseProto) } public interface AddBlockRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required string clientName = 2; /** * required string clientName = 2; */ boolean hasClientName(); /** * required string clientName = 2; */ java.lang.String getClientName(); /** * required string clientName = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes(); // optional .hadoop.hdfs.ExtendedBlockProto previous = 3; /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ boolean hasPrevious(); /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getPrevious(); /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getPreviousOrBuilder(); // repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ java.util.List getExcludeNodesList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludeNodes(int index); /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ int getExcludeNodesCount(); /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ java.util.List getExcludeNodesOrBuilderList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludeNodesOrBuilder( int index); // optional uint64 fileId = 5 [default = 0]; /** * optional uint64 fileId = 5 [default = 0]; * *
     * default as a bogus id
     * 
*/ boolean hasFileId(); /** * optional uint64 fileId = 5 [default = 0]; * *
     * default as a bogus id
     * 
*/ long getFileId(); // repeated string favoredNodes = 6; /** * repeated string favoredNodes = 6; * *
     *the set of datanodes to use for the block
     * 
*/ java.util.List getFavoredNodesList(); /** * repeated string favoredNodes = 6; * *
     *the set of datanodes to use for the block
     * 
*/ int getFavoredNodesCount(); /** * repeated string favoredNodes = 6; * *
     *the set of datanodes to use for the block
     * 
*/ java.lang.String getFavoredNodes(int index); /** * repeated string favoredNodes = 6; * *
     *the set of datanodes to use for the block
     * 
*/ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFavoredNodesBytes(int index); // repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
     * default to empty.
     * 
*/ java.util.List getFlagsList(); /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
     * default to empty.
     * 
*/ int getFlagsCount(); /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
     * default to empty.
     * 
*/ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto getFlags(int index); } /** * Protobuf type {@code hadoop.hdfs.AddBlockRequestProto} */ public static final class AddBlockRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AddBlockRequestProtoOrBuilder { // Use AddBlockRequestProto.newBuilder() to construct. private AddBlockRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AddBlockRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AddBlockRequestProto defaultInstance; public static AddBlockRequestProto getDefaultInstance() { return defaultInstance; } public AddBlockRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AddBlockRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; clientName_ = input.readBytes(); break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = previous_.toBuilder(); } previous_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(previous_); previous_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 34: { if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { excludeNodes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000008; } excludeNodes_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry)); break; } case 40: { bitField0_ |= 0x00000008; fileId_ = input.readUInt64(); break; } case 50: { if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { favoredNodes_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000020; } favoredNodes_.add(input.readBytes()); break; } case 56: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(7, rawValue); } else { if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { flags_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000040; } flags_.add(value); } break; } case 58: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(7, rawValue); } else { if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { flags_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000040; } flags_.add(value); } } input.popLimit(oldLimit); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { excludeNodes_ = java.util.Collections.unmodifiableList(excludeNodes_); } if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { favoredNodes_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList(favoredNodes_); } if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { flags_ = java.util.Collections.unmodifiableList(flags_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddBlockRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddBlockRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AddBlockRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AddBlockRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string clientName = 2; public static final int CLIENTNAME_FIELD_NUMBER = 2; private java.lang.Object clientName_; /** * required string clientName = 2; */ public boolean hasClientName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string clientName = 2; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * required string clientName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional .hadoop.hdfs.ExtendedBlockProto previous = 3; public static final int PREVIOUS_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto previous_; /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ public boolean hasPrevious() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getPrevious() { return previous_; } /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getPreviousOrBuilder() { return previous_; } // repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; public static final int EXCLUDENODES_FIELD_NUMBER = 4; private java.util.List excludeNodes_; /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public java.util.List getExcludeNodesList() { return excludeNodes_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public java.util.List getExcludeNodesOrBuilderList() { return excludeNodes_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public int getExcludeNodesCount() { return excludeNodes_.size(); } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludeNodes(int index) { return excludeNodes_.get(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludeNodesOrBuilder( int index) { return excludeNodes_.get(index); } // optional uint64 fileId = 5 [default = 0]; public static final int FILEID_FIELD_NUMBER = 5; private long fileId_; /** * optional uint64 fileId = 5 [default = 0]; * *
     * default as a bogus id
     * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 fileId = 5 [default = 0]; * *
     * default as a bogus id
     * 
*/ public long getFileId() { return fileId_; } // repeated string favoredNodes = 6; public static final int FAVOREDNODES_FIELD_NUMBER = 6; private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList favoredNodes_; /** * repeated string favoredNodes = 6; * *
     *the set of datanodes to use for the block
     * 
*/ public java.util.List getFavoredNodesList() { return favoredNodes_; } /** * repeated string favoredNodes = 6; * *
     *the set of datanodes to use for the block
     * 
*/ public int getFavoredNodesCount() { return favoredNodes_.size(); } /** * repeated string favoredNodes = 6; * *
     *the set of datanodes to use for the block
     * 
*/ public java.lang.String getFavoredNodes(int index) { return favoredNodes_.get(index); } /** * repeated string favoredNodes = 6; * *
     *the set of datanodes to use for the block
     * 
*/ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFavoredNodesBytes(int index) { return favoredNodes_.getByteString(index); } // repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; public static final int FLAGS_FIELD_NUMBER = 7; private java.util.List flags_; /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
     * default to empty.
     * 
*/ public java.util.List getFlagsList() { return flags_; } /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
     * default to empty.
     * 
*/ public int getFlagsCount() { return flags_.size(); } /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
     * default to empty.
     * 
*/ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto getFlags(int index) { return flags_.get(index); } private void initFields() { src_ = ""; clientName_ = ""; previous_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); excludeNodes_ = java.util.Collections.emptyList(); fileId_ = 0L; favoredNodes_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; flags_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasClientName()) { memoizedIsInitialized = 0; return false; } if (hasPrevious()) { if (!getPrevious().isInitialized()) { memoizedIsInitialized = 0; return false; } } for (int i = 0; i < getExcludeNodesCount(); i++) { if (!getExcludeNodes(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getClientNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, previous_); } for (int i = 0; i < excludeNodes_.size(); i++) { output.writeMessage(4, excludeNodes_.get(i)); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(5, fileId_); } for (int i = 0; i < favoredNodes_.size(); i++) { output.writeBytes(6, favoredNodes_.getByteString(i)); } for (int i = 0; i < flags_.size(); i++) { output.writeEnum(7, flags_.get(i).getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getClientNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(3, previous_); } for (int i = 0; i < excludeNodes_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(4, excludeNodes_.get(i)); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(5, fileId_); } { int dataSize = 0; for (int i = 0; i < favoredNodes_.size(); i++) { dataSize += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSizeNoTag(favoredNodes_.getByteString(i)); } size += dataSize; size += 1 * getFavoredNodesList().size(); } { int dataSize = 0; for (int i = 0; i < flags_.size(); i++) { dataSize += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeEnumSizeNoTag(flags_.get(i).getNumber()); } size += dataSize; size += 1 * flags_.size(); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasClientName() == other.hasClientName()); if (hasClientName()) { result = result && getClientName() .equals(other.getClientName()); } result = result && (hasPrevious() == other.hasPrevious()); if (hasPrevious()) { result = result && getPrevious() .equals(other.getPrevious()); } result = result && getExcludeNodesList() .equals(other.getExcludeNodesList()); result = result && (hasFileId() == other.hasFileId()); if (hasFileId()) { result = result && (getFileId() == other.getFileId()); } result = result && getFavoredNodesList() .equals(other.getFavoredNodesList()); result = result && getFlagsList() .equals(other.getFlagsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } if (hasPrevious()) { hash = (37 * hash) + PREVIOUS_FIELD_NUMBER; hash = (53 * hash) + getPrevious().hashCode(); } if (getExcludeNodesCount() > 0) { hash = (37 * hash) + EXCLUDENODES_FIELD_NUMBER; hash = (53 * hash) + getExcludeNodesList().hashCode(); } if (hasFileId()) { hash = (37 * hash) + FILEID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFileId()); } if (getFavoredNodesCount() > 0) { hash = (37 * hash) + FAVOREDNODES_FIELD_NUMBER; hash = (53 * hash) + getFavoredNodesList().hashCode(); } if (getFlagsCount() > 0) { hash = (37 * hash) + FLAGS_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getFlagsList()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AddBlockRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddBlockRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddBlockRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getPreviousFieldBuilder(); getExcludeNodesFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (previousBuilder_ == null) { previous_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); } else { previousBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); if (excludeNodesBuilder_ == null) { excludeNodes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); } else { excludeNodesBuilder_.clear(); } fileId_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); favoredNodes_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000020); flags_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddBlockRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.clientName_ = clientName_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (previousBuilder_ == null) { result.previous_ = previous_; } else { result.previous_ = previousBuilder_.build(); } if (excludeNodesBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008)) { excludeNodes_ = java.util.Collections.unmodifiableList(excludeNodes_); bitField0_ = (bitField0_ & ~0x00000008); } result.excludeNodes_ = excludeNodes_; } else { result.excludeNodes_ = excludeNodesBuilder_.build(); } if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000008; } result.fileId_ = fileId_; if (((bitField0_ & 0x00000020) == 0x00000020)) { favoredNodes_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList( favoredNodes_); bitField0_ = (bitField0_ & ~0x00000020); } result.favoredNodes_ = favoredNodes_; if (((bitField0_ & 0x00000040) == 0x00000040)) { flags_ = java.util.Collections.unmodifiableList(flags_); bitField0_ = (bitField0_ & ~0x00000040); } result.flags_ = flags_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasClientName()) { bitField0_ |= 0x00000002; clientName_ = other.clientName_; onChanged(); } if (other.hasPrevious()) { mergePrevious(other.getPrevious()); } if (excludeNodesBuilder_ == null) { if (!other.excludeNodes_.isEmpty()) { if (excludeNodes_.isEmpty()) { excludeNodes_ = other.excludeNodes_; bitField0_ = (bitField0_ & ~0x00000008); } else { ensureExcludeNodesIsMutable(); excludeNodes_.addAll(other.excludeNodes_); } onChanged(); } } else { if (!other.excludeNodes_.isEmpty()) { if (excludeNodesBuilder_.isEmpty()) { excludeNodesBuilder_.dispose(); excludeNodesBuilder_ = null; excludeNodes_ = other.excludeNodes_; bitField0_ = (bitField0_ & ~0x00000008); excludeNodesBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getExcludeNodesFieldBuilder() : null; } else { excludeNodesBuilder_.addAllMessages(other.excludeNodes_); } } } if (other.hasFileId()) { setFileId(other.getFileId()); } if (!other.favoredNodes_.isEmpty()) { if (favoredNodes_.isEmpty()) { favoredNodes_ = other.favoredNodes_; bitField0_ = (bitField0_ & ~0x00000020); } else { ensureFavoredNodesIsMutable(); favoredNodes_.addAll(other.favoredNodes_); } onChanged(); } if (!other.flags_.isEmpty()) { if (flags_.isEmpty()) { flags_ = other.flags_; bitField0_ = (bitField0_ & ~0x00000040); } else { ensureFlagsIsMutable(); flags_.addAll(other.flags_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasClientName()) { return false; } if (hasPrevious()) { if (!getPrevious().isInitialized()) { return false; } } for (int i = 0; i < getExcludeNodesCount(); i++) { if (!getExcludeNodes(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required string clientName = 2; private java.lang.Object clientName_ = ""; /** * required string clientName = 2; */ public boolean hasClientName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string clientName = 2; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientName = 2; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientName_ = value; onChanged(); return this; } /** * required string clientName = 2; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000002); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * required string clientName = 2; */ public Builder setClientNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientName_ = value; onChanged(); return this; } // optional .hadoop.hdfs.ExtendedBlockProto previous = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto previous_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> previousBuilder_; /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ public boolean hasPrevious() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getPrevious() { if (previousBuilder_ == null) { return previous_; } else { return previousBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ public Builder setPrevious(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (previousBuilder_ == null) { if (value == null) { throw new NullPointerException(); } previous_ = value; onChanged(); } else { previousBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ public Builder setPrevious( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { if (previousBuilder_ == null) { previous_ = builderForValue.build(); onChanged(); } else { previousBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ public Builder mergePrevious(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (previousBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && previous_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { previous_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(previous_).mergeFrom(value).buildPartial(); } else { previous_ = value; } onChanged(); } else { previousBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ public Builder clearPrevious() { if (previousBuilder_ == null) { previous_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); onChanged(); } else { previousBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getPreviousBuilder() { bitField0_ |= 0x00000004; onChanged(); return getPreviousFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getPreviousOrBuilder() { if (previousBuilder_ != null) { return previousBuilder_.getMessageOrBuilder(); } else { return previous_; } } /** * optional .hadoop.hdfs.ExtendedBlockProto previous = 3; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> getPreviousFieldBuilder() { if (previousBuilder_ == null) { previousBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( previous_, getParentForChildren(), isClean()); previous_ = null; } return previousBuilder_; } // repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; private java.util.List excludeNodes_ = java.util.Collections.emptyList(); private void ensureExcludeNodesIsMutable() { if (!((bitField0_ & 0x00000008) == 0x00000008)) { excludeNodes_ = new java.util.ArrayList(excludeNodes_); bitField0_ |= 0x00000008; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> excludeNodesBuilder_; /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public java.util.List getExcludeNodesList() { if (excludeNodesBuilder_ == null) { return java.util.Collections.unmodifiableList(excludeNodes_); } else { return excludeNodesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public int getExcludeNodesCount() { if (excludeNodesBuilder_ == null) { return excludeNodes_.size(); } else { return excludeNodesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludeNodes(int index) { if (excludeNodesBuilder_ == null) { return excludeNodes_.get(index); } else { return excludeNodesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public Builder setExcludeNodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (excludeNodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureExcludeNodesIsMutable(); excludeNodes_.set(index, value); onChanged(); } else { excludeNodesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public Builder setExcludeNodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (excludeNodesBuilder_ == null) { ensureExcludeNodesIsMutable(); excludeNodes_.set(index, builderForValue.build()); onChanged(); } else { excludeNodesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public Builder addExcludeNodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (excludeNodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureExcludeNodesIsMutable(); excludeNodes_.add(value); onChanged(); } else { excludeNodesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public Builder addExcludeNodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (excludeNodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureExcludeNodesIsMutable(); excludeNodes_.add(index, value); onChanged(); } else { excludeNodesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public Builder addExcludeNodes( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (excludeNodesBuilder_ == null) { ensureExcludeNodesIsMutable(); excludeNodes_.add(builderForValue.build()); onChanged(); } else { excludeNodesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public Builder addExcludeNodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (excludeNodesBuilder_ == null) { ensureExcludeNodesIsMutable(); excludeNodes_.add(index, builderForValue.build()); onChanged(); } else { excludeNodesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public Builder addAllExcludeNodes( java.lang.Iterable values) { if (excludeNodesBuilder_ == null) { ensureExcludeNodesIsMutable(); super.addAll(values, excludeNodes_); onChanged(); } else { excludeNodesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public Builder clearExcludeNodes() { if (excludeNodesBuilder_ == null) { excludeNodes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); } else { excludeNodesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public Builder removeExcludeNodes(int index) { if (excludeNodesBuilder_ == null) { ensureExcludeNodesIsMutable(); excludeNodes_.remove(index); onChanged(); } else { excludeNodesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getExcludeNodesBuilder( int index) { return getExcludeNodesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludeNodesOrBuilder( int index) { if (excludeNodesBuilder_ == null) { return excludeNodes_.get(index); } else { return excludeNodesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public java.util.List getExcludeNodesOrBuilderList() { if (excludeNodesBuilder_ != null) { return excludeNodesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(excludeNodes_); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExcludeNodesBuilder() { return getExcludeNodesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExcludeNodesBuilder( int index) { return getExcludeNodesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludeNodes = 4; */ public java.util.List getExcludeNodesBuilderList() { return getExcludeNodesFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> getExcludeNodesFieldBuilder() { if (excludeNodesBuilder_ == null) { excludeNodesBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( excludeNodes_, ((bitField0_ & 0x00000008) == 0x00000008), getParentForChildren(), isClean()); excludeNodes_ = null; } return excludeNodesBuilder_; } // optional uint64 fileId = 5 [default = 0]; private long fileId_ ; /** * optional uint64 fileId = 5 [default = 0]; * *
       * default as a bogus id
       * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional uint64 fileId = 5 [default = 0]; * *
       * default as a bogus id
       * 
*/ public long getFileId() { return fileId_; } /** * optional uint64 fileId = 5 [default = 0]; * *
       * default as a bogus id
       * 
*/ public Builder setFileId(long value) { bitField0_ |= 0x00000010; fileId_ = value; onChanged(); return this; } /** * optional uint64 fileId = 5 [default = 0]; * *
       * default as a bogus id
       * 
*/ public Builder clearFileId() { bitField0_ = (bitField0_ & ~0x00000010); fileId_ = 0L; onChanged(); return this; } // repeated string favoredNodes = 6; private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList favoredNodes_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureFavoredNodesIsMutable() { if (!((bitField0_ & 0x00000020) == 0x00000020)) { favoredNodes_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList(favoredNodes_); bitField0_ |= 0x00000020; } } /** * repeated string favoredNodes = 6; * *
       *the set of datanodes to use for the block
       * 
*/ public java.util.List getFavoredNodesList() { return java.util.Collections.unmodifiableList(favoredNodes_); } /** * repeated string favoredNodes = 6; * *
       *the set of datanodes to use for the block
       * 
*/ public int getFavoredNodesCount() { return favoredNodes_.size(); } /** * repeated string favoredNodes = 6; * *
       *the set of datanodes to use for the block
       * 
*/ public java.lang.String getFavoredNodes(int index) { return favoredNodes_.get(index); } /** * repeated string favoredNodes = 6; * *
       *the set of datanodes to use for the block
       * 
*/ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFavoredNodesBytes(int index) { return favoredNodes_.getByteString(index); } /** * repeated string favoredNodes = 6; * *
       *the set of datanodes to use for the block
       * 
*/ public Builder setFavoredNodes( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureFavoredNodesIsMutable(); favoredNodes_.set(index, value); onChanged(); return this; } /** * repeated string favoredNodes = 6; * *
       *the set of datanodes to use for the block
       * 
*/ public Builder addFavoredNodes( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureFavoredNodesIsMutable(); favoredNodes_.add(value); onChanged(); return this; } /** * repeated string favoredNodes = 6; * *
       *the set of datanodes to use for the block
       * 
*/ public Builder addAllFavoredNodes( java.lang.Iterable values) { ensureFavoredNodesIsMutable(); super.addAll(values, favoredNodes_); onChanged(); return this; } /** * repeated string favoredNodes = 6; * *
       *the set of datanodes to use for the block
       * 
*/ public Builder clearFavoredNodes() { favoredNodes_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } /** * repeated string favoredNodes = 6; * *
       *the set of datanodes to use for the block
       * 
*/ public Builder addFavoredNodesBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureFavoredNodesIsMutable(); favoredNodes_.add(value); onChanged(); return this; } // repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; private java.util.List flags_ = java.util.Collections.emptyList(); private void ensureFlagsIsMutable() { if (!((bitField0_ & 0x00000040) == 0x00000040)) { flags_ = new java.util.ArrayList(flags_); bitField0_ |= 0x00000040; } } /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
       * default to empty.
       * 
*/ public java.util.List getFlagsList() { return java.util.Collections.unmodifiableList(flags_); } /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
       * default to empty.
       * 
*/ public int getFlagsCount() { return flags_.size(); } /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
       * default to empty.
       * 
*/ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto getFlags(int index) { return flags_.get(index); } /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
       * default to empty.
       * 
*/ public Builder setFlags( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto value) { if (value == null) { throw new NullPointerException(); } ensureFlagsIsMutable(); flags_.set(index, value); onChanged(); return this; } /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
       * default to empty.
       * 
*/ public Builder addFlags(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto value) { if (value == null) { throw new NullPointerException(); } ensureFlagsIsMutable(); flags_.add(value); onChanged(); return this; } /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
       * default to empty.
       * 
*/ public Builder addAllFlags( java.lang.Iterable values) { ensureFlagsIsMutable(); super.addAll(values, flags_); onChanged(); return this; } /** * repeated .hadoop.hdfs.AddBlockFlagProto flags = 7; * *
       * default to empty.
       * 
*/ public Builder clearFlags() { flags_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AddBlockRequestProto) } static { defaultInstance = new AddBlockRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AddBlockRequestProto) } public interface AddBlockResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.LocatedBlockProto block = 1; /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ boolean hasBlock(); /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock(); /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.AddBlockResponseProto} */ public static final class AddBlockResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AddBlockResponseProtoOrBuilder { // Use AddBlockResponseProto.newBuilder() to construct. private AddBlockResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AddBlockResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AddBlockResponseProto defaultInstance; public static AddBlockResponseProto getDefaultInstance() { return defaultInstance; } public AddBlockResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AddBlockResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = block_.toBuilder(); } block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(block_); block_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddBlockResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddBlockResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AddBlockResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AddBlockResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.LocatedBlockProto block = 1; public static final int BLOCK_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_; /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { return block_; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { return block_; } private void initFields() { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBlock()) { memoizedIsInitialized = 0; return false; } if (!getBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, block_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, block_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto) obj; boolean result = true; result = result && (hasBlock() == other.hasBlock()); if (hasBlock()) { result = result && getBlock() .equals(other.getBlock()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBlock()) { hash = (37 * hash) + BLOCK_FIELD_NUMBER; hash = (53 * hash) + getBlock().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AddBlockResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddBlockResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddBlockResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBlockFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddBlockResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (blockBuilder_ == null) { result.block_ = block_; } else { result.block_ = blockBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance()) return this; if (other.hasBlock()) { mergeBlock(other.getBlock()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBlock()) { return false; } if (!getBlock().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.LocatedBlockProto block = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_; /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { if (blockBuilder_ == null) { return block_; } else { return blockBuilder_.getMessage(); } } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } block_ = value; onChanged(); } else { blockBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder setBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blockBuilder_ == null) { block_ = builderForValue.build(); onChanged(); } else { blockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blockBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); } else { block_ = value; } onChanged(); } else { blockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder clearBlock() { if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); onChanged(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() { bitField0_ |= 0x00000001; onChanged(); return getBlockFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { if (blockBuilder_ != null) { return blockBuilder_.getMessageOrBuilder(); } else { return block_; } } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getBlockFieldBuilder() { if (blockBuilder_ == null) { blockBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( block_, getParentForChildren(), isClean()); block_ = null; } return blockBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AddBlockResponseProto) } static { defaultInstance = new AddBlockResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AddBlockResponseProto) } public interface GetAdditionalDatanodeRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required .hadoop.hdfs.ExtendedBlockProto blk = 2; /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ boolean hasBlk(); /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlk(); /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlkOrBuilder(); // repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ java.util.List getExistingsList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExistings(int index); /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ int getExistingsCount(); /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ java.util.List getExistingsOrBuilderList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExistingsOrBuilder( int index); // repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ java.util.List getExcludesList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludes(int index); /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ int getExcludesCount(); /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ java.util.List getExcludesOrBuilderList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludesOrBuilder( int index); // required uint32 numAdditionalNodes = 5; /** * required uint32 numAdditionalNodes = 5; */ boolean hasNumAdditionalNodes(); /** * required uint32 numAdditionalNodes = 5; */ int getNumAdditionalNodes(); // required string clientName = 6; /** * required string clientName = 6; */ boolean hasClientName(); /** * required string clientName = 6; */ java.lang.String getClientName(); /** * required string clientName = 6; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes(); // repeated string existingStorageUuids = 7; /** * repeated string existingStorageUuids = 7; */ java.util.List getExistingStorageUuidsList(); /** * repeated string existingStorageUuids = 7; */ int getExistingStorageUuidsCount(); /** * repeated string existingStorageUuids = 7; */ java.lang.String getExistingStorageUuids(int index); /** * repeated string existingStorageUuids = 7; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getExistingStorageUuidsBytes(int index); // optional uint64 fileId = 8 [default = 0]; /** * optional uint64 fileId = 8 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ boolean hasFileId(); /** * optional uint64 fileId = 8 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ long getFileId(); } /** * Protobuf type {@code hadoop.hdfs.GetAdditionalDatanodeRequestProto} */ public static final class GetAdditionalDatanodeRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetAdditionalDatanodeRequestProtoOrBuilder { // Use GetAdditionalDatanodeRequestProto.newBuilder() to construct. private GetAdditionalDatanodeRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetAdditionalDatanodeRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetAdditionalDatanodeRequestProto defaultInstance; public static GetAdditionalDatanodeRequestProto getDefaultInstance() { return defaultInstance; } public GetAdditionalDatanodeRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetAdditionalDatanodeRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = blk_.toBuilder(); } blk_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(blk_); blk_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } case 26: { if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { existings_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000004; } existings_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry)); break; } case 34: { if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { excludes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000008; } excludes_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry)); break; } case 40: { bitField0_ |= 0x00000004; numAdditionalNodes_ = input.readUInt32(); break; } case 50: { bitField0_ |= 0x00000008; clientName_ = input.readBytes(); break; } case 58: { if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { existingStorageUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000040; } existingStorageUuids_.add(input.readBytes()); break; } case 64: { bitField0_ |= 0x00000010; fileId_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { existings_ = java.util.Collections.unmodifiableList(existings_); } if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { excludes_ = java.util.Collections.unmodifiableList(excludes_); } if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { existingStorageUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList(existingStorageUuids_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetAdditionalDatanodeRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetAdditionalDatanodeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetAdditionalDatanodeRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetAdditionalDatanodeRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required .hadoop.hdfs.ExtendedBlockProto blk = 2; public static final int BLK_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto blk_; /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ public boolean hasBlk() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlk() { return blk_; } /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlkOrBuilder() { return blk_; } // repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; public static final int EXISTINGS_FIELD_NUMBER = 3; private java.util.List existings_; /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public java.util.List getExistingsList() { return existings_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public java.util.List getExistingsOrBuilderList() { return existings_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public int getExistingsCount() { return existings_.size(); } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExistings(int index) { return existings_.get(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExistingsOrBuilder( int index) { return existings_.get(index); } // repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; public static final int EXCLUDES_FIELD_NUMBER = 4; private java.util.List excludes_; /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public java.util.List getExcludesList() { return excludes_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public java.util.List getExcludesOrBuilderList() { return excludes_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public int getExcludesCount() { return excludes_.size(); } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludes(int index) { return excludes_.get(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludesOrBuilder( int index) { return excludes_.get(index); } // required uint32 numAdditionalNodes = 5; public static final int NUMADDITIONALNODES_FIELD_NUMBER = 5; private int numAdditionalNodes_; /** * required uint32 numAdditionalNodes = 5; */ public boolean hasNumAdditionalNodes() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint32 numAdditionalNodes = 5; */ public int getNumAdditionalNodes() { return numAdditionalNodes_; } // required string clientName = 6; public static final int CLIENTNAME_FIELD_NUMBER = 6; private java.lang.Object clientName_; /** * required string clientName = 6; */ public boolean hasClientName() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required string clientName = 6; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * required string clientName = 6; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // repeated string existingStorageUuids = 7; public static final int EXISTINGSTORAGEUUIDS_FIELD_NUMBER = 7; private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList existingStorageUuids_; /** * repeated string existingStorageUuids = 7; */ public java.util.List getExistingStorageUuidsList() { return existingStorageUuids_; } /** * repeated string existingStorageUuids = 7; */ public int getExistingStorageUuidsCount() { return existingStorageUuids_.size(); } /** * repeated string existingStorageUuids = 7; */ public java.lang.String getExistingStorageUuids(int index) { return existingStorageUuids_.get(index); } /** * repeated string existingStorageUuids = 7; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getExistingStorageUuidsBytes(int index) { return existingStorageUuids_.getByteString(index); } // optional uint64 fileId = 8 [default = 0]; public static final int FILEID_FIELD_NUMBER = 8; private long fileId_; /** * optional uint64 fileId = 8 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional uint64 fileId = 8 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ public long getFileId() { return fileId_; } private void initFields() { src_ = ""; blk_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); existings_ = java.util.Collections.emptyList(); excludes_ = java.util.Collections.emptyList(); numAdditionalNodes_ = 0; clientName_ = ""; existingStorageUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; fileId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasBlk()) { memoizedIsInitialized = 0; return false; } if (!hasNumAdditionalNodes()) { memoizedIsInitialized = 0; return false; } if (!hasClientName()) { memoizedIsInitialized = 0; return false; } if (!getBlk().isInitialized()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getExistingsCount(); i++) { if (!getExistings(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } for (int i = 0; i < getExcludesCount(); i++) { if (!getExcludes(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, blk_); } for (int i = 0; i < existings_.size(); i++) { output.writeMessage(3, existings_.get(i)); } for (int i = 0; i < excludes_.size(); i++) { output.writeMessage(4, excludes_.get(i)); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt32(5, numAdditionalNodes_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(6, getClientNameBytes()); } for (int i = 0; i < existingStorageUuids_.size(); i++) { output.writeBytes(7, existingStorageUuids_.getByteString(i)); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt64(8, fileId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(2, blk_); } for (int i = 0; i < existings_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(3, existings_.get(i)); } for (int i = 0; i < excludes_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(4, excludes_.get(i)); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt32Size(5, numAdditionalNodes_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(6, getClientNameBytes()); } { int dataSize = 0; for (int i = 0; i < existingStorageUuids_.size(); i++) { dataSize += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSizeNoTag(existingStorageUuids_.getByteString(i)); } size += dataSize; size += 1 * getExistingStorageUuidsList().size(); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(8, fileId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasBlk() == other.hasBlk()); if (hasBlk()) { result = result && getBlk() .equals(other.getBlk()); } result = result && getExistingsList() .equals(other.getExistingsList()); result = result && getExcludesList() .equals(other.getExcludesList()); result = result && (hasNumAdditionalNodes() == other.hasNumAdditionalNodes()); if (hasNumAdditionalNodes()) { result = result && (getNumAdditionalNodes() == other.getNumAdditionalNodes()); } result = result && (hasClientName() == other.hasClientName()); if (hasClientName()) { result = result && getClientName() .equals(other.getClientName()); } result = result && getExistingStorageUuidsList() .equals(other.getExistingStorageUuidsList()); result = result && (hasFileId() == other.hasFileId()); if (hasFileId()) { result = result && (getFileId() == other.getFileId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasBlk()) { hash = (37 * hash) + BLK_FIELD_NUMBER; hash = (53 * hash) + getBlk().hashCode(); } if (getExistingsCount() > 0) { hash = (37 * hash) + EXISTINGS_FIELD_NUMBER; hash = (53 * hash) + getExistingsList().hashCode(); } if (getExcludesCount() > 0) { hash = (37 * hash) + EXCLUDES_FIELD_NUMBER; hash = (53 * hash) + getExcludesList().hashCode(); } if (hasNumAdditionalNodes()) { hash = (37 * hash) + NUMADDITIONALNODES_FIELD_NUMBER; hash = (53 * hash) + getNumAdditionalNodes(); } if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } if (getExistingStorageUuidsCount() > 0) { hash = (37 * hash) + EXISTINGSTORAGEUUIDS_FIELD_NUMBER; hash = (53 * hash) + getExistingStorageUuidsList().hashCode(); } if (hasFileId()) { hash = (37 * hash) + FILEID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFileId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetAdditionalDatanodeRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetAdditionalDatanodeRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetAdditionalDatanodeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBlkFieldBuilder(); getExistingsFieldBuilder(); getExcludesFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (blkBuilder_ == null) { blk_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); } else { blkBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); if (existingsBuilder_ == null) { existings_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); } else { existingsBuilder_.clear(); } if (excludesBuilder_ == null) { excludes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); } else { excludesBuilder_.clear(); } numAdditionalNodes_ = 0; bitField0_ = (bitField0_ & ~0x00000010); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000020); existingStorageUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000040); fileId_ = 0L; bitField0_ = (bitField0_ & ~0x00000080); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetAdditionalDatanodeRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (blkBuilder_ == null) { result.blk_ = blk_; } else { result.blk_ = blkBuilder_.build(); } if (existingsBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004)) { existings_ = java.util.Collections.unmodifiableList(existings_); bitField0_ = (bitField0_ & ~0x00000004); } result.existings_ = existings_; } else { result.existings_ = existingsBuilder_.build(); } if (excludesBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008)) { excludes_ = java.util.Collections.unmodifiableList(excludes_); bitField0_ = (bitField0_ & ~0x00000008); } result.excludes_ = excludes_; } else { result.excludes_ = excludesBuilder_.build(); } if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000004; } result.numAdditionalNodes_ = numAdditionalNodes_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000008; } result.clientName_ = clientName_; if (((bitField0_ & 0x00000040) == 0x00000040)) { existingStorageUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList( existingStorageUuids_); bitField0_ = (bitField0_ & ~0x00000040); } result.existingStorageUuids_ = existingStorageUuids_; if (((from_bitField0_ & 0x00000080) == 0x00000080)) { to_bitField0_ |= 0x00000010; } result.fileId_ = fileId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasBlk()) { mergeBlk(other.getBlk()); } if (existingsBuilder_ == null) { if (!other.existings_.isEmpty()) { if (existings_.isEmpty()) { existings_ = other.existings_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureExistingsIsMutable(); existings_.addAll(other.existings_); } onChanged(); } } else { if (!other.existings_.isEmpty()) { if (existingsBuilder_.isEmpty()) { existingsBuilder_.dispose(); existingsBuilder_ = null; existings_ = other.existings_; bitField0_ = (bitField0_ & ~0x00000004); existingsBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getExistingsFieldBuilder() : null; } else { existingsBuilder_.addAllMessages(other.existings_); } } } if (excludesBuilder_ == null) { if (!other.excludes_.isEmpty()) { if (excludes_.isEmpty()) { excludes_ = other.excludes_; bitField0_ = (bitField0_ & ~0x00000008); } else { ensureExcludesIsMutable(); excludes_.addAll(other.excludes_); } onChanged(); } } else { if (!other.excludes_.isEmpty()) { if (excludesBuilder_.isEmpty()) { excludesBuilder_.dispose(); excludesBuilder_ = null; excludes_ = other.excludes_; bitField0_ = (bitField0_ & ~0x00000008); excludesBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getExcludesFieldBuilder() : null; } else { excludesBuilder_.addAllMessages(other.excludes_); } } } if (other.hasNumAdditionalNodes()) { setNumAdditionalNodes(other.getNumAdditionalNodes()); } if (other.hasClientName()) { bitField0_ |= 0x00000020; clientName_ = other.clientName_; onChanged(); } if (!other.existingStorageUuids_.isEmpty()) { if (existingStorageUuids_.isEmpty()) { existingStorageUuids_ = other.existingStorageUuids_; bitField0_ = (bitField0_ & ~0x00000040); } else { ensureExistingStorageUuidsIsMutable(); existingStorageUuids_.addAll(other.existingStorageUuids_); } onChanged(); } if (other.hasFileId()) { setFileId(other.getFileId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasBlk()) { return false; } if (!hasNumAdditionalNodes()) { return false; } if (!hasClientName()) { return false; } if (!getBlk().isInitialized()) { return false; } for (int i = 0; i < getExistingsCount(); i++) { if (!getExistings(i).isInitialized()) { return false; } } for (int i = 0; i < getExcludesCount(); i++) { if (!getExcludes(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required .hadoop.hdfs.ExtendedBlockProto blk = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto blk_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blkBuilder_; /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ public boolean hasBlk() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlk() { if (blkBuilder_ == null) { return blk_; } else { return blkBuilder_.getMessage(); } } /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ public Builder setBlk(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (blkBuilder_ == null) { if (value == null) { throw new NullPointerException(); } blk_ = value; onChanged(); } else { blkBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ public Builder setBlk( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { if (blkBuilder_ == null) { blk_ = builderForValue.build(); onChanged(); } else { blkBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ public Builder mergeBlk(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (blkBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && blk_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { blk_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(blk_).mergeFrom(value).buildPartial(); } else { blk_ = value; } onChanged(); } else { blkBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ public Builder clearBlk() { if (blkBuilder_ == null) { blk_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); onChanged(); } else { blkBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlkBuilder() { bitField0_ |= 0x00000002; onChanged(); return getBlkFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlkOrBuilder() { if (blkBuilder_ != null) { return blkBuilder_.getMessageOrBuilder(); } else { return blk_; } } /** * required .hadoop.hdfs.ExtendedBlockProto blk = 2; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> getBlkFieldBuilder() { if (blkBuilder_ == null) { blkBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( blk_, getParentForChildren(), isClean()); blk_ = null; } return blkBuilder_; } // repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; private java.util.List existings_ = java.util.Collections.emptyList(); private void ensureExistingsIsMutable() { if (!((bitField0_ & 0x00000004) == 0x00000004)) { existings_ = new java.util.ArrayList(existings_); bitField0_ |= 0x00000004; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> existingsBuilder_; /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public java.util.List getExistingsList() { if (existingsBuilder_ == null) { return java.util.Collections.unmodifiableList(existings_); } else { return existingsBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public int getExistingsCount() { if (existingsBuilder_ == null) { return existings_.size(); } else { return existingsBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExistings(int index) { if (existingsBuilder_ == null) { return existings_.get(index); } else { return existingsBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public Builder setExistings( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (existingsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureExistingsIsMutable(); existings_.set(index, value); onChanged(); } else { existingsBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public Builder setExistings( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (existingsBuilder_ == null) { ensureExistingsIsMutable(); existings_.set(index, builderForValue.build()); onChanged(); } else { existingsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public Builder addExistings(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (existingsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureExistingsIsMutable(); existings_.add(value); onChanged(); } else { existingsBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public Builder addExistings( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (existingsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureExistingsIsMutable(); existings_.add(index, value); onChanged(); } else { existingsBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public Builder addExistings( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (existingsBuilder_ == null) { ensureExistingsIsMutable(); existings_.add(builderForValue.build()); onChanged(); } else { existingsBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public Builder addExistings( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (existingsBuilder_ == null) { ensureExistingsIsMutable(); existings_.add(index, builderForValue.build()); onChanged(); } else { existingsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public Builder addAllExistings( java.lang.Iterable values) { if (existingsBuilder_ == null) { ensureExistingsIsMutable(); super.addAll(values, existings_); onChanged(); } else { existingsBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public Builder clearExistings() { if (existingsBuilder_ == null) { existings_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); } else { existingsBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public Builder removeExistings(int index) { if (existingsBuilder_ == null) { ensureExistingsIsMutable(); existings_.remove(index); onChanged(); } else { existingsBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getExistingsBuilder( int index) { return getExistingsFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExistingsOrBuilder( int index) { if (existingsBuilder_ == null) { return existings_.get(index); } else { return existingsBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public java.util.List getExistingsOrBuilderList() { if (existingsBuilder_ != null) { return existingsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(existings_); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExistingsBuilder() { return getExistingsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExistingsBuilder( int index) { return getExistingsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto existings = 3; */ public java.util.List getExistingsBuilderList() { return getExistingsFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> getExistingsFieldBuilder() { if (existingsBuilder_ == null) { existingsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( existings_, ((bitField0_ & 0x00000004) == 0x00000004), getParentForChildren(), isClean()); existings_ = null; } return existingsBuilder_; } // repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; private java.util.List excludes_ = java.util.Collections.emptyList(); private void ensureExcludesIsMutable() { if (!((bitField0_ & 0x00000008) == 0x00000008)) { excludes_ = new java.util.ArrayList(excludes_); bitField0_ |= 0x00000008; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> excludesBuilder_; /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public java.util.List getExcludesList() { if (excludesBuilder_ == null) { return java.util.Collections.unmodifiableList(excludes_); } else { return excludesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public int getExcludesCount() { if (excludesBuilder_ == null) { return excludes_.size(); } else { return excludesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludes(int index) { if (excludesBuilder_ == null) { return excludes_.get(index); } else { return excludesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public Builder setExcludes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (excludesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureExcludesIsMutable(); excludes_.set(index, value); onChanged(); } else { excludesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public Builder setExcludes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (excludesBuilder_ == null) { ensureExcludesIsMutable(); excludes_.set(index, builderForValue.build()); onChanged(); } else { excludesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public Builder addExcludes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (excludesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureExcludesIsMutable(); excludes_.add(value); onChanged(); } else { excludesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public Builder addExcludes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (excludesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureExcludesIsMutable(); excludes_.add(index, value); onChanged(); } else { excludesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public Builder addExcludes( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (excludesBuilder_ == null) { ensureExcludesIsMutable(); excludes_.add(builderForValue.build()); onChanged(); } else { excludesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public Builder addExcludes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (excludesBuilder_ == null) { ensureExcludesIsMutable(); excludes_.add(index, builderForValue.build()); onChanged(); } else { excludesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public Builder addAllExcludes( java.lang.Iterable values) { if (excludesBuilder_ == null) { ensureExcludesIsMutable(); super.addAll(values, excludes_); onChanged(); } else { excludesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public Builder clearExcludes() { if (excludesBuilder_ == null) { excludes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); } else { excludesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public Builder removeExcludes(int index) { if (excludesBuilder_ == null) { ensureExcludesIsMutable(); excludes_.remove(index); onChanged(); } else { excludesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getExcludesBuilder( int index) { return getExcludesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludesOrBuilder( int index) { if (excludesBuilder_ == null) { return excludes_.get(index); } else { return excludesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public java.util.List getExcludesOrBuilderList() { if (excludesBuilder_ != null) { return excludesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(excludes_); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExcludesBuilder() { return getExcludesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExcludesBuilder( int index) { return getExcludesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto excludes = 4; */ public java.util.List getExcludesBuilderList() { return getExcludesFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> getExcludesFieldBuilder() { if (excludesBuilder_ == null) { excludesBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( excludes_, ((bitField0_ & 0x00000008) == 0x00000008), getParentForChildren(), isClean()); excludes_ = null; } return excludesBuilder_; } // required uint32 numAdditionalNodes = 5; private int numAdditionalNodes_ ; /** * required uint32 numAdditionalNodes = 5; */ public boolean hasNumAdditionalNodes() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint32 numAdditionalNodes = 5; */ public int getNumAdditionalNodes() { return numAdditionalNodes_; } /** * required uint32 numAdditionalNodes = 5; */ public Builder setNumAdditionalNodes(int value) { bitField0_ |= 0x00000010; numAdditionalNodes_ = value; onChanged(); return this; } /** * required uint32 numAdditionalNodes = 5; */ public Builder clearNumAdditionalNodes() { bitField0_ = (bitField0_ & ~0x00000010); numAdditionalNodes_ = 0; onChanged(); return this; } // required string clientName = 6; private java.lang.Object clientName_ = ""; /** * required string clientName = 6; */ public boolean hasClientName() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required string clientName = 6; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientName = 6; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientName = 6; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; clientName_ = value; onChanged(); return this; } /** * required string clientName = 6; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000020); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * required string clientName = 6; */ public Builder setClientNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; clientName_ = value; onChanged(); return this; } // repeated string existingStorageUuids = 7; private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList existingStorageUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureExistingStorageUuidsIsMutable() { if (!((bitField0_ & 0x00000040) == 0x00000040)) { existingStorageUuids_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList(existingStorageUuids_); bitField0_ |= 0x00000040; } } /** * repeated string existingStorageUuids = 7; */ public java.util.List getExistingStorageUuidsList() { return java.util.Collections.unmodifiableList(existingStorageUuids_); } /** * repeated string existingStorageUuids = 7; */ public int getExistingStorageUuidsCount() { return existingStorageUuids_.size(); } /** * repeated string existingStorageUuids = 7; */ public java.lang.String getExistingStorageUuids(int index) { return existingStorageUuids_.get(index); } /** * repeated string existingStorageUuids = 7; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getExistingStorageUuidsBytes(int index) { return existingStorageUuids_.getByteString(index); } /** * repeated string existingStorageUuids = 7; */ public Builder setExistingStorageUuids( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureExistingStorageUuidsIsMutable(); existingStorageUuids_.set(index, value); onChanged(); return this; } /** * repeated string existingStorageUuids = 7; */ public Builder addExistingStorageUuids( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureExistingStorageUuidsIsMutable(); existingStorageUuids_.add(value); onChanged(); return this; } /** * repeated string existingStorageUuids = 7; */ public Builder addAllExistingStorageUuids( java.lang.Iterable values) { ensureExistingStorageUuidsIsMutable(); super.addAll(values, existingStorageUuids_); onChanged(); return this; } /** * repeated string existingStorageUuids = 7; */ public Builder clearExistingStorageUuids() { existingStorageUuids_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000040); onChanged(); return this; } /** * repeated string existingStorageUuids = 7; */ public Builder addExistingStorageUuidsBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureExistingStorageUuidsIsMutable(); existingStorageUuids_.add(value); onChanged(); return this; } // optional uint64 fileId = 8 [default = 0]; private long fileId_ ; /** * optional uint64 fileId = 8 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional uint64 fileId = 8 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public long getFileId() { return fileId_; } /** * optional uint64 fileId = 8 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public Builder setFileId(long value) { bitField0_ |= 0x00000080; fileId_ = value; onChanged(); return this; } /** * optional uint64 fileId = 8 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public Builder clearFileId() { bitField0_ = (bitField0_ & ~0x00000080); fileId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetAdditionalDatanodeRequestProto) } static { defaultInstance = new GetAdditionalDatanodeRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetAdditionalDatanodeRequestProto) } public interface GetAdditionalDatanodeResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.LocatedBlockProto block = 1; /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ boolean hasBlock(); /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock(); /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetAdditionalDatanodeResponseProto} */ public static final class GetAdditionalDatanodeResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetAdditionalDatanodeResponseProtoOrBuilder { // Use GetAdditionalDatanodeResponseProto.newBuilder() to construct. private GetAdditionalDatanodeResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetAdditionalDatanodeResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetAdditionalDatanodeResponseProto defaultInstance; public static GetAdditionalDatanodeResponseProto getDefaultInstance() { return defaultInstance; } public GetAdditionalDatanodeResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetAdditionalDatanodeResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = block_.toBuilder(); } block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(block_); block_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetAdditionalDatanodeResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetAdditionalDatanodeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetAdditionalDatanodeResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetAdditionalDatanodeResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.LocatedBlockProto block = 1; public static final int BLOCK_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_; /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { return block_; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { return block_; } private void initFields() { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBlock()) { memoizedIsInitialized = 0; return false; } if (!getBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, block_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, block_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto) obj; boolean result = true; result = result && (hasBlock() == other.hasBlock()); if (hasBlock()) { result = result && getBlock() .equals(other.getBlock()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBlock()) { hash = (37 * hash) + BLOCK_FIELD_NUMBER; hash = (53 * hash) + getBlock().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetAdditionalDatanodeResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetAdditionalDatanodeResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetAdditionalDatanodeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBlockFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetAdditionalDatanodeResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (blockBuilder_ == null) { result.block_ = block_; } else { result.block_ = blockBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance()) return this; if (other.hasBlock()) { mergeBlock(other.getBlock()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBlock()) { return false; } if (!getBlock().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.LocatedBlockProto block = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_; /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { if (blockBuilder_ == null) { return block_; } else { return blockBuilder_.getMessage(); } } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } block_ = value; onChanged(); } else { blockBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder setBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blockBuilder_ == null) { block_ = builderForValue.build(); onChanged(); } else { blockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blockBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); } else { block_ = value; } onChanged(); } else { blockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder clearBlock() { if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); onChanged(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() { bitField0_ |= 0x00000001; onChanged(); return getBlockFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { if (blockBuilder_ != null) { return blockBuilder_.getMessageOrBuilder(); } else { return block_; } } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getBlockFieldBuilder() { if (blockBuilder_ == null) { blockBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( block_, getParentForChildren(), isClean()); block_ = null; } return blockBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetAdditionalDatanodeResponseProto) } static { defaultInstance = new GetAdditionalDatanodeResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetAdditionalDatanodeResponseProto) } public interface CompleteRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required string clientName = 2; /** * required string clientName = 2; */ boolean hasClientName(); /** * required string clientName = 2; */ java.lang.String getClientName(); /** * required string clientName = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes(); // optional .hadoop.hdfs.ExtendedBlockProto last = 3; /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ boolean hasLast(); /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getLast(); /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getLastOrBuilder(); // optional uint64 fileId = 4 [default = 0]; /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ boolean hasFileId(); /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ long getFileId(); } /** * Protobuf type {@code hadoop.hdfs.CompleteRequestProto} */ public static final class CompleteRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CompleteRequestProtoOrBuilder { // Use CompleteRequestProto.newBuilder() to construct. private CompleteRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CompleteRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CompleteRequestProto defaultInstance; public static CompleteRequestProto getDefaultInstance() { return defaultInstance; } public CompleteRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CompleteRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; clientName_ = input.readBytes(); break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = last_.toBuilder(); } last_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(last_); last_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 32: { bitField0_ |= 0x00000008; fileId_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CompleteRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CompleteRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CompleteRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CompleteRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string clientName = 2; public static final int CLIENTNAME_FIELD_NUMBER = 2; private java.lang.Object clientName_; /** * required string clientName = 2; */ public boolean hasClientName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string clientName = 2; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * required string clientName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional .hadoop.hdfs.ExtendedBlockProto last = 3; public static final int LAST_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto last_; /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ public boolean hasLast() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getLast() { return last_; } /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getLastOrBuilder() { return last_; } // optional uint64 fileId = 4 [default = 0]; public static final int FILEID_FIELD_NUMBER = 4; private long fileId_; /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ public long getFileId() { return fileId_; } private void initFields() { src_ = ""; clientName_ = ""; last_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); fileId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasClientName()) { memoizedIsInitialized = 0; return false; } if (hasLast()) { if (!getLast().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getClientNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, last_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, fileId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getClientNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(3, last_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(4, fileId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasClientName() == other.hasClientName()); if (hasClientName()) { result = result && getClientName() .equals(other.getClientName()); } result = result && (hasLast() == other.hasLast()); if (hasLast()) { result = result && getLast() .equals(other.getLast()); } result = result && (hasFileId() == other.hasFileId()); if (hasFileId()) { result = result && (getFileId() == other.getFileId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } if (hasLast()) { hash = (37 * hash) + LAST_FIELD_NUMBER; hash = (53 * hash) + getLast().hashCode(); } if (hasFileId()) { hash = (37 * hash) + FILEID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFileId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CompleteRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CompleteRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CompleteRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getLastFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (lastBuilder_ == null) { last_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); } else { lastBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); fileId_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CompleteRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.clientName_ = clientName_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (lastBuilder_ == null) { result.last_ = last_; } else { result.last_ = lastBuilder_.build(); } if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.fileId_ = fileId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasClientName()) { bitField0_ |= 0x00000002; clientName_ = other.clientName_; onChanged(); } if (other.hasLast()) { mergeLast(other.getLast()); } if (other.hasFileId()) { setFileId(other.getFileId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasClientName()) { return false; } if (hasLast()) { if (!getLast().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required string clientName = 2; private java.lang.Object clientName_ = ""; /** * required string clientName = 2; */ public boolean hasClientName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string clientName = 2; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientName = 2; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientName_ = value; onChanged(); return this; } /** * required string clientName = 2; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000002); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * required string clientName = 2; */ public Builder setClientNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientName_ = value; onChanged(); return this; } // optional .hadoop.hdfs.ExtendedBlockProto last = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto last_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> lastBuilder_; /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ public boolean hasLast() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getLast() { if (lastBuilder_ == null) { return last_; } else { return lastBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ public Builder setLast(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (lastBuilder_ == null) { if (value == null) { throw new NullPointerException(); } last_ = value; onChanged(); } else { lastBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ public Builder setLast( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { if (lastBuilder_ == null) { last_ = builderForValue.build(); onChanged(); } else { lastBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ public Builder mergeLast(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (lastBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && last_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { last_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(last_).mergeFrom(value).buildPartial(); } else { last_ = value; } onChanged(); } else { lastBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ public Builder clearLast() { if (lastBuilder_ == null) { last_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); onChanged(); } else { lastBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getLastBuilder() { bitField0_ |= 0x00000004; onChanged(); return getLastFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getLastOrBuilder() { if (lastBuilder_ != null) { return lastBuilder_.getMessageOrBuilder(); } else { return last_; } } /** * optional .hadoop.hdfs.ExtendedBlockProto last = 3; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> getLastFieldBuilder() { if (lastBuilder_ == null) { lastBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( last_, getParentForChildren(), isClean()); last_ = null; } return lastBuilder_; } // optional uint64 fileId = 4 [default = 0]; private long fileId_ ; /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public long getFileId() { return fileId_; } /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public Builder setFileId(long value) { bitField0_ |= 0x00000008; fileId_ = value; onChanged(); return this; } /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public Builder clearFileId() { bitField0_ = (bitField0_ & ~0x00000008); fileId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CompleteRequestProto) } static { defaultInstance = new CompleteRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CompleteRequestProto) } public interface CompleteResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required bool result = 1; /** * required bool result = 1; */ boolean hasResult(); /** * required bool result = 1; */ boolean getResult(); } /** * Protobuf type {@code hadoop.hdfs.CompleteResponseProto} */ public static final class CompleteResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CompleteResponseProtoOrBuilder { // Use CompleteResponseProto.newBuilder() to construct. private CompleteResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CompleteResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CompleteResponseProto defaultInstance; public static CompleteResponseProto getDefaultInstance() { return defaultInstance; } public CompleteResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CompleteResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; result_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CompleteResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CompleteResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CompleteResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CompleteResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool result = 1; public static final int RESULT_FIELD_NUMBER = 1; private boolean result_; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } private void initFields() { result_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasResult()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, result_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, result_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto) obj; boolean result = true; result = result && (hasResult() == other.hasResult()); if (hasResult()) { result = result && (getResult() == other.getResult()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getResult()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CompleteResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CompleteResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CompleteResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); result_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CompleteResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.result_ = result_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance()) return this; if (other.hasResult()) { setResult(other.getResult()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasResult()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool result = 1; private boolean result_ ; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } /** * required bool result = 1; */ public Builder setResult(boolean value) { bitField0_ |= 0x00000001; result_ = value; onChanged(); return this; } /** * required bool result = 1; */ public Builder clearResult() { bitField0_ = (bitField0_ & ~0x00000001); result_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CompleteResponseProto) } static { defaultInstance = new CompleteResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CompleteResponseProto) } public interface ReportBadBlocksRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ java.util.List getBlocksList(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ int getBlocksCount(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ java.util.List getBlocksOrBuilderList(); /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.ReportBadBlocksRequestProto} */ public static final class ReportBadBlocksRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ReportBadBlocksRequestProtoOrBuilder { // Use ReportBadBlocksRequestProto.newBuilder() to construct. private ReportBadBlocksRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ReportBadBlocksRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ReportBadBlocksRequestProto defaultInstance; public static ReportBadBlocksRequestProto getDefaultInstance() { return defaultInstance; } public ReportBadBlocksRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ReportBadBlocksRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { blocks_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry)); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ReportBadBlocksRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ReportBadBlocksRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ReportBadBlocksRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ReportBadBlocksRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } // repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; public static final int BLOCKS_FIELD_NUMBER = 1; private java.util.List blocks_; /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public java.util.List getBlocksList() { return blocks_; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public java.util.List getBlocksOrBuilderList() { return blocks_; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public int getBlocksCount() { return blocks_.size(); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { return blocks_.get(index); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index) { return blocks_.get(index); } private void initFields() { blocks_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < blocks_.size(); i++) { output.writeMessage(1, blocks_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < blocks_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, blocks_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto) obj; boolean result = true; result = result && getBlocksList() .equals(other.getBlocksList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getBlocksCount() > 0) { hash = (37 * hash) + BLOCKS_FIELD_NUMBER; hash = (53 * hash) + getBlocksList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ReportBadBlocksRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ReportBadBlocksRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ReportBadBlocksRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBlocksFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { blocksBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ReportBadBlocksRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto(this); int from_bitField0_ = bitField0_; if (blocksBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { blocks_ = java.util.Collections.unmodifiableList(blocks_); bitField0_ = (bitField0_ & ~0x00000001); } result.blocks_ = blocks_; } else { result.blocks_ = blocksBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance()) return this; if (blocksBuilder_ == null) { if (!other.blocks_.isEmpty()) { if (blocks_.isEmpty()) { blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureBlocksIsMutable(); blocks_.addAll(other.blocks_); } onChanged(); } } else { if (!other.blocks_.isEmpty()) { if (blocksBuilder_.isEmpty()) { blocksBuilder_.dispose(); blocksBuilder_ = null; blocks_ = other.blocks_; bitField0_ = (bitField0_ & ~0x00000001); blocksBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getBlocksFieldBuilder() : null; } else { blocksBuilder_.addAllMessages(other.blocks_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getBlocksCount(); i++) { if (!getBlocks(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; private java.util.List blocks_ = java.util.Collections.emptyList(); private void ensureBlocksIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { blocks_ = new java.util.ArrayList(blocks_); bitField0_ |= 0x00000001; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_; /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public java.util.List getBlocksList() { if (blocksBuilder_ == null) { return java.util.Collections.unmodifiableList(blocks_); } else { return blocksBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public int getBlocksCount() { if (blocksBuilder_ == null) { return blocks_.size(); } else { return blocksBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.set(index, value); onChanged(); } else { blocksBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public Builder setBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.set(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(value); onChanged(); } else { blocksBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blocksBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureBlocksIsMutable(); blocks_.add(index, value); onChanged(); } else { blocksBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public Builder addBlocks( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public Builder addBlocks( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.add(index, builderForValue.build()); onChanged(); } else { blocksBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public Builder addAllBlocks( java.lang.Iterable values) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); super.addAll(values, blocks_); onChanged(); } else { blocksBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public Builder clearBlocks() { if (blocksBuilder_ == null) { blocks_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { blocksBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public Builder removeBlocks(int index) { if (blocksBuilder_ == null) { ensureBlocksIsMutable(); blocks_.remove(index); onChanged(); } else { blocksBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder( int index) { return getBlocksFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( int index) { if (blocksBuilder_ == null) { return blocks_.get(index); } else { return blocksBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public java.util.List getBlocksOrBuilderList() { if (blocksBuilder_ != null) { return blocksBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(blocks_); } } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() { return getBlocksFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder( int index) { return getBlocksFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.LocatedBlockProto blocks = 1; */ public java.util.List getBlocksBuilderList() { return getBlocksFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getBlocksFieldBuilder() { if (blocksBuilder_ == null) { blocksBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( blocks_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); blocks_ = null; } return blocksBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReportBadBlocksRequestProto) } static { defaultInstance = new ReportBadBlocksRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ReportBadBlocksRequestProto) } public interface ReportBadBlocksResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.ReportBadBlocksResponseProto} * *
   * void response
   * 
*/ public static final class ReportBadBlocksResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ReportBadBlocksResponseProtoOrBuilder { // Use ReportBadBlocksResponseProto.newBuilder() to construct. private ReportBadBlocksResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ReportBadBlocksResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ReportBadBlocksResponseProto defaultInstance; public static ReportBadBlocksResponseProto getDefaultInstance() { return defaultInstance; } public ReportBadBlocksResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ReportBadBlocksResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ReportBadBlocksResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ReportBadBlocksResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ReportBadBlocksResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ReportBadBlocksResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ReportBadBlocksResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ReportBadBlocksResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ReportBadBlocksResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ReportBadBlocksResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReportBadBlocksResponseProto) } static { defaultInstance = new ReportBadBlocksResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ReportBadBlocksResponseProto) } public interface ConcatRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string trg = 1; /** * required string trg = 1; */ boolean hasTrg(); /** * required string trg = 1; */ java.lang.String getTrg(); /** * required string trg = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getTrgBytes(); // repeated string srcs = 2; /** * repeated string srcs = 2; */ java.util.List getSrcsList(); /** * repeated string srcs = 2; */ int getSrcsCount(); /** * repeated string srcs = 2; */ java.lang.String getSrcs(int index); /** * repeated string srcs = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcsBytes(int index); } /** * Protobuf type {@code hadoop.hdfs.ConcatRequestProto} */ public static final class ConcatRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ConcatRequestProtoOrBuilder { // Use ConcatRequestProto.newBuilder() to construct. private ConcatRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ConcatRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ConcatRequestProto defaultInstance; public static ConcatRequestProto getDefaultInstance() { return defaultInstance; } public ConcatRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ConcatRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; trg_ = input.readBytes(); break; } case 18: { if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { srcs_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000002; } srcs_.add(input.readBytes()); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { srcs_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList(srcs_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ConcatRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ConcatRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ConcatRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ConcatRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string trg = 1; public static final int TRG_FIELD_NUMBER = 1; private java.lang.Object trg_; /** * required string trg = 1; */ public boolean hasTrg() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string trg = 1; */ public java.lang.String getTrg() { java.lang.Object ref = trg_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { trg_ = s; } return s; } } /** * required string trg = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getTrgBytes() { java.lang.Object ref = trg_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); trg_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // repeated string srcs = 2; public static final int SRCS_FIELD_NUMBER = 2; private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList srcs_; /** * repeated string srcs = 2; */ public java.util.List getSrcsList() { return srcs_; } /** * repeated string srcs = 2; */ public int getSrcsCount() { return srcs_.size(); } /** * repeated string srcs = 2; */ public java.lang.String getSrcs(int index) { return srcs_.get(index); } /** * repeated string srcs = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcsBytes(int index) { return srcs_.getByteString(index); } private void initFields() { trg_ = ""; srcs_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasTrg()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getTrgBytes()); } for (int i = 0; i < srcs_.size(); i++) { output.writeBytes(2, srcs_.getByteString(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getTrgBytes()); } { int dataSize = 0; for (int i = 0; i < srcs_.size(); i++) { dataSize += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSizeNoTag(srcs_.getByteString(i)); } size += dataSize; size += 1 * getSrcsList().size(); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto) obj; boolean result = true; result = result && (hasTrg() == other.hasTrg()); if (hasTrg()) { result = result && getTrg() .equals(other.getTrg()); } result = result && getSrcsList() .equals(other.getSrcsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasTrg()) { hash = (37 * hash) + TRG_FIELD_NUMBER; hash = (53 * hash) + getTrg().hashCode(); } if (getSrcsCount() > 0) { hash = (37 * hash) + SRCS_FIELD_NUMBER; hash = (53 * hash) + getSrcsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ConcatRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ConcatRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ConcatRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); trg_ = ""; bitField0_ = (bitField0_ & ~0x00000001); srcs_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ConcatRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.trg_ = trg_; if (((bitField0_ & 0x00000002) == 0x00000002)) { srcs_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList( srcs_); bitField0_ = (bitField0_ & ~0x00000002); } result.srcs_ = srcs_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.getDefaultInstance()) return this; if (other.hasTrg()) { bitField0_ |= 0x00000001; trg_ = other.trg_; onChanged(); } if (!other.srcs_.isEmpty()) { if (srcs_.isEmpty()) { srcs_ = other.srcs_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureSrcsIsMutable(); srcs_.addAll(other.srcs_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasTrg()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string trg = 1; private java.lang.Object trg_ = ""; /** * required string trg = 1; */ public boolean hasTrg() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string trg = 1; */ public java.lang.String getTrg() { java.lang.Object ref = trg_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); trg_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string trg = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getTrgBytes() { java.lang.Object ref = trg_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); trg_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string trg = 1; */ public Builder setTrg( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; trg_ = value; onChanged(); return this; } /** * required string trg = 1; */ public Builder clearTrg() { bitField0_ = (bitField0_ & ~0x00000001); trg_ = getDefaultInstance().getTrg(); onChanged(); return this; } /** * required string trg = 1; */ public Builder setTrgBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; trg_ = value; onChanged(); return this; } // repeated string srcs = 2; private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList srcs_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureSrcsIsMutable() { if (!((bitField0_ & 0x00000002) == 0x00000002)) { srcs_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList(srcs_); bitField0_ |= 0x00000002; } } /** * repeated string srcs = 2; */ public java.util.List getSrcsList() { return java.util.Collections.unmodifiableList(srcs_); } /** * repeated string srcs = 2; */ public int getSrcsCount() { return srcs_.size(); } /** * repeated string srcs = 2; */ public java.lang.String getSrcs(int index) { return srcs_.get(index); } /** * repeated string srcs = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcsBytes(int index) { return srcs_.getByteString(index); } /** * repeated string srcs = 2; */ public Builder setSrcs( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureSrcsIsMutable(); srcs_.set(index, value); onChanged(); return this; } /** * repeated string srcs = 2; */ public Builder addSrcs( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureSrcsIsMutable(); srcs_.add(value); onChanged(); return this; } /** * repeated string srcs = 2; */ public Builder addAllSrcs( java.lang.Iterable values) { ensureSrcsIsMutable(); super.addAll(values, srcs_); onChanged(); return this; } /** * repeated string srcs = 2; */ public Builder clearSrcs() { srcs_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * repeated string srcs = 2; */ public Builder addSrcsBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureSrcsIsMutable(); srcs_.add(value); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ConcatRequestProto) } static { defaultInstance = new ConcatRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ConcatRequestProto) } public interface ConcatResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.ConcatResponseProto} * *
   * void response
   * 
*/ public static final class ConcatResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ConcatResponseProtoOrBuilder { // Use ConcatResponseProto.newBuilder() to construct. private ConcatResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ConcatResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ConcatResponseProto defaultInstance; public static ConcatResponseProto getDefaultInstance() { return defaultInstance; } public ConcatResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ConcatResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ConcatResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ConcatResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ConcatResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ConcatResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ConcatResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ConcatResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ConcatResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ConcatResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ConcatResponseProto) } static { defaultInstance = new ConcatResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ConcatResponseProto) } public interface TruncateRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required uint64 newLength = 2; /** * required uint64 newLength = 2; */ boolean hasNewLength(); /** * required uint64 newLength = 2; */ long getNewLength(); // required string clientName = 3; /** * required string clientName = 3; */ boolean hasClientName(); /** * required string clientName = 3; */ java.lang.String getClientName(); /** * required string clientName = 3; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.TruncateRequestProto} */ public static final class TruncateRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements TruncateRequestProtoOrBuilder { // Use TruncateRequestProto.newBuilder() to construct. private TruncateRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private TruncateRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final TruncateRequestProto defaultInstance; public static TruncateRequestProto getDefaultInstance() { return defaultInstance; } public TruncateRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private TruncateRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; newLength_ = input.readUInt64(); break; } case 26: { bitField0_ |= 0x00000004; clientName_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_TruncateRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_TruncateRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public TruncateRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new TruncateRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required uint64 newLength = 2; public static final int NEWLENGTH_FIELD_NUMBER = 2; private long newLength_; /** * required uint64 newLength = 2; */ public boolean hasNewLength() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 newLength = 2; */ public long getNewLength() { return newLength_; } // required string clientName = 3; public static final int CLIENTNAME_FIELD_NUMBER = 3; private java.lang.Object clientName_; /** * required string clientName = 3; */ public boolean hasClientName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string clientName = 3; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * required string clientName = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { src_ = ""; newLength_ = 0L; clientName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasNewLength()) { memoizedIsInitialized = 0; return false; } if (!hasClientName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, newLength_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getClientNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(2, newLength_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(3, getClientNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasNewLength() == other.hasNewLength()); if (hasNewLength()) { result = result && (getNewLength() == other.getNewLength()); } result = result && (hasClientName() == other.hasClientName()); if (hasClientName()) { result = result && getClientName() .equals(other.getClientName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasNewLength()) { hash = (37 * hash) + NEWLENGTH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getNewLength()); } if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.TruncateRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_TruncateRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_TruncateRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); newLength_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_TruncateRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.newLength_ = newLength_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.clientName_ = clientName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasNewLength()) { setNewLength(other.getNewLength()); } if (other.hasClientName()) { bitField0_ |= 0x00000004; clientName_ = other.clientName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasNewLength()) { return false; } if (!hasClientName()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required uint64 newLength = 2; private long newLength_ ; /** * required uint64 newLength = 2; */ public boolean hasNewLength() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 newLength = 2; */ public long getNewLength() { return newLength_; } /** * required uint64 newLength = 2; */ public Builder setNewLength(long value) { bitField0_ |= 0x00000002; newLength_ = value; onChanged(); return this; } /** * required uint64 newLength = 2; */ public Builder clearNewLength() { bitField0_ = (bitField0_ & ~0x00000002); newLength_ = 0L; onChanged(); return this; } // required string clientName = 3; private java.lang.Object clientName_ = ""; /** * required string clientName = 3; */ public boolean hasClientName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string clientName = 3; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientName = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientName = 3; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; clientName_ = value; onChanged(); return this; } /** * required string clientName = 3; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000004); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * required string clientName = 3; */ public Builder setClientNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; clientName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.TruncateRequestProto) } static { defaultInstance = new TruncateRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.TruncateRequestProto) } public interface TruncateResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required bool result = 1; /** * required bool result = 1; */ boolean hasResult(); /** * required bool result = 1; */ boolean getResult(); } /** * Protobuf type {@code hadoop.hdfs.TruncateResponseProto} */ public static final class TruncateResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements TruncateResponseProtoOrBuilder { // Use TruncateResponseProto.newBuilder() to construct. private TruncateResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private TruncateResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final TruncateResponseProto defaultInstance; public static TruncateResponseProto getDefaultInstance() { return defaultInstance; } public TruncateResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private TruncateResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; result_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_TruncateResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_TruncateResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public TruncateResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new TruncateResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool result = 1; public static final int RESULT_FIELD_NUMBER = 1; private boolean result_; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } private void initFields() { result_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasResult()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, result_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, result_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto) obj; boolean result = true; result = result && (hasResult() == other.hasResult()); if (hasResult()) { result = result && (getResult() == other.getResult()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getResult()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.TruncateResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_TruncateResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_TruncateResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); result_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_TruncateResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.result_ = result_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.getDefaultInstance()) return this; if (other.hasResult()) { setResult(other.getResult()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasResult()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool result = 1; private boolean result_ ; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } /** * required bool result = 1; */ public Builder setResult(boolean value) { bitField0_ |= 0x00000001; result_ = value; onChanged(); return this; } /** * required bool result = 1; */ public Builder clearResult() { bitField0_ = (bitField0_ & ~0x00000001); result_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.TruncateResponseProto) } static { defaultInstance = new TruncateResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.TruncateResponseProto) } public interface RenameRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required string dst = 2; /** * required string dst = 2; */ boolean hasDst(); /** * required string dst = 2; */ java.lang.String getDst(); /** * required string dst = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getDstBytes(); } /** * Protobuf type {@code hadoop.hdfs.RenameRequestProto} */ public static final class RenameRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RenameRequestProtoOrBuilder { // Use RenameRequestProto.newBuilder() to construct. private RenameRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RenameRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RenameRequestProto defaultInstance; public static RenameRequestProto getDefaultInstance() { return defaultInstance; } public RenameRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RenameRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; dst_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RenameRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RenameRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string dst = 2; public static final int DST_FIELD_NUMBER = 2; private java.lang.Object dst_; /** * required string dst = 2; */ public boolean hasDst() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string dst = 2; */ public java.lang.String getDst() { java.lang.Object ref = dst_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { dst_ = s; } return s; } } /** * required string dst = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getDstBytes() { java.lang.Object ref = dst_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); dst_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { src_ = ""; dst_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasDst()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getDstBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getDstBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasDst() == other.hasDst()); if (hasDst()) { result = result && getDst() .equals(other.getDst()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasDst()) { hash = (37 * hash) + DST_FIELD_NUMBER; hash = (53 * hash) + getDst().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RenameRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); dst_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.dst_ = dst_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasDst()) { bitField0_ |= 0x00000002; dst_ = other.dst_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasDst()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required string dst = 2; private java.lang.Object dst_ = ""; /** * required string dst = 2; */ public boolean hasDst() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string dst = 2; */ public java.lang.String getDst() { java.lang.Object ref = dst_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); dst_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string dst = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getDstBytes() { java.lang.Object ref = dst_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); dst_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string dst = 2; */ public Builder setDst( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; dst_ = value; onChanged(); return this; } /** * required string dst = 2; */ public Builder clearDst() { bitField0_ = (bitField0_ & ~0x00000002); dst_ = getDefaultInstance().getDst(); onChanged(); return this; } /** * required string dst = 2; */ public Builder setDstBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; dst_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RenameRequestProto) } static { defaultInstance = new RenameRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RenameRequestProto) } public interface RenameResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required bool result = 1; /** * required bool result = 1; */ boolean hasResult(); /** * required bool result = 1; */ boolean getResult(); } /** * Protobuf type {@code hadoop.hdfs.RenameResponseProto} */ public static final class RenameResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RenameResponseProtoOrBuilder { // Use RenameResponseProto.newBuilder() to construct. private RenameResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RenameResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RenameResponseProto defaultInstance; public static RenameResponseProto getDefaultInstance() { return defaultInstance; } public RenameResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RenameResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; result_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RenameResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RenameResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool result = 1; public static final int RESULT_FIELD_NUMBER = 1; private boolean result_; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } private void initFields() { result_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasResult()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, result_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, result_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto) obj; boolean result = true; result = result && (hasResult() == other.hasResult()); if (hasResult()) { result = result && (getResult() == other.getResult()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getResult()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RenameResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); result_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.result_ = result_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance()) return this; if (other.hasResult()) { setResult(other.getResult()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasResult()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool result = 1; private boolean result_ ; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } /** * required bool result = 1; */ public Builder setResult(boolean value) { bitField0_ |= 0x00000001; result_ = value; onChanged(); return this; } /** * required bool result = 1; */ public Builder clearResult() { bitField0_ = (bitField0_ & ~0x00000001); result_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RenameResponseProto) } static { defaultInstance = new RenameResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RenameResponseProto) } public interface Rename2RequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required string dst = 2; /** * required string dst = 2; */ boolean hasDst(); /** * required string dst = 2; */ java.lang.String getDst(); /** * required string dst = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getDstBytes(); // required bool overwriteDest = 3; /** * required bool overwriteDest = 3; */ boolean hasOverwriteDest(); /** * required bool overwriteDest = 3; */ boolean getOverwriteDest(); // optional bool moveToTrash = 4; /** * optional bool moveToTrash = 4; */ boolean hasMoveToTrash(); /** * optional bool moveToTrash = 4; */ boolean getMoveToTrash(); } /** * Protobuf type {@code hadoop.hdfs.Rename2RequestProto} */ public static final class Rename2RequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements Rename2RequestProtoOrBuilder { // Use Rename2RequestProto.newBuilder() to construct. private Rename2RequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private Rename2RequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final Rename2RequestProto defaultInstance; public static Rename2RequestProto getDefaultInstance() { return defaultInstance; } public Rename2RequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private Rename2RequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; dst_ = input.readBytes(); break; } case 24: { bitField0_ |= 0x00000004; overwriteDest_ = input.readBool(); break; } case 32: { bitField0_ |= 0x00000008; moveToTrash_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_Rename2RequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_Rename2RequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public Rename2RequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new Rename2RequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string dst = 2; public static final int DST_FIELD_NUMBER = 2; private java.lang.Object dst_; /** * required string dst = 2; */ public boolean hasDst() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string dst = 2; */ public java.lang.String getDst() { java.lang.Object ref = dst_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { dst_ = s; } return s; } } /** * required string dst = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getDstBytes() { java.lang.Object ref = dst_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); dst_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required bool overwriteDest = 3; public static final int OVERWRITEDEST_FIELD_NUMBER = 3; private boolean overwriteDest_; /** * required bool overwriteDest = 3; */ public boolean hasOverwriteDest() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bool overwriteDest = 3; */ public boolean getOverwriteDest() { return overwriteDest_; } // optional bool moveToTrash = 4; public static final int MOVETOTRASH_FIELD_NUMBER = 4; private boolean moveToTrash_; /** * optional bool moveToTrash = 4; */ public boolean hasMoveToTrash() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional bool moveToTrash = 4; */ public boolean getMoveToTrash() { return moveToTrash_; } private void initFields() { src_ = ""; dst_ = ""; overwriteDest_ = false; moveToTrash_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasDst()) { memoizedIsInitialized = 0; return false; } if (!hasOverwriteDest()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getDstBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBool(3, overwriteDest_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBool(4, moveToTrash_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getDstBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(3, overwriteDest_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(4, moveToTrash_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasDst() == other.hasDst()); if (hasDst()) { result = result && getDst() .equals(other.getDst()); } result = result && (hasOverwriteDest() == other.hasOverwriteDest()); if (hasOverwriteDest()) { result = result && (getOverwriteDest() == other.getOverwriteDest()); } result = result && (hasMoveToTrash() == other.hasMoveToTrash()); if (hasMoveToTrash()) { result = result && (getMoveToTrash() == other.getMoveToTrash()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasDst()) { hash = (37 * hash) + DST_FIELD_NUMBER; hash = (53 * hash) + getDst().hashCode(); } if (hasOverwriteDest()) { hash = (37 * hash) + OVERWRITEDEST_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getOverwriteDest()); } if (hasMoveToTrash()) { hash = (37 * hash) + MOVETOTRASH_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getMoveToTrash()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.Rename2RequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_Rename2RequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_Rename2RequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); dst_ = ""; bitField0_ = (bitField0_ & ~0x00000002); overwriteDest_ = false; bitField0_ = (bitField0_ & ~0x00000004); moveToTrash_ = false; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_Rename2RequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.dst_ = dst_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.overwriteDest_ = overwriteDest_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.moveToTrash_ = moveToTrash_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasDst()) { bitField0_ |= 0x00000002; dst_ = other.dst_; onChanged(); } if (other.hasOverwriteDest()) { setOverwriteDest(other.getOverwriteDest()); } if (other.hasMoveToTrash()) { setMoveToTrash(other.getMoveToTrash()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasDst()) { return false; } if (!hasOverwriteDest()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required string dst = 2; private java.lang.Object dst_ = ""; /** * required string dst = 2; */ public boolean hasDst() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string dst = 2; */ public java.lang.String getDst() { java.lang.Object ref = dst_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); dst_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string dst = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getDstBytes() { java.lang.Object ref = dst_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); dst_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string dst = 2; */ public Builder setDst( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; dst_ = value; onChanged(); return this; } /** * required string dst = 2; */ public Builder clearDst() { bitField0_ = (bitField0_ & ~0x00000002); dst_ = getDefaultInstance().getDst(); onChanged(); return this; } /** * required string dst = 2; */ public Builder setDstBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; dst_ = value; onChanged(); return this; } // required bool overwriteDest = 3; private boolean overwriteDest_ ; /** * required bool overwriteDest = 3; */ public boolean hasOverwriteDest() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bool overwriteDest = 3; */ public boolean getOverwriteDest() { return overwriteDest_; } /** * required bool overwriteDest = 3; */ public Builder setOverwriteDest(boolean value) { bitField0_ |= 0x00000004; overwriteDest_ = value; onChanged(); return this; } /** * required bool overwriteDest = 3; */ public Builder clearOverwriteDest() { bitField0_ = (bitField0_ & ~0x00000004); overwriteDest_ = false; onChanged(); return this; } // optional bool moveToTrash = 4; private boolean moveToTrash_ ; /** * optional bool moveToTrash = 4; */ public boolean hasMoveToTrash() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional bool moveToTrash = 4; */ public boolean getMoveToTrash() { return moveToTrash_; } /** * optional bool moveToTrash = 4; */ public Builder setMoveToTrash(boolean value) { bitField0_ |= 0x00000008; moveToTrash_ = value; onChanged(); return this; } /** * optional bool moveToTrash = 4; */ public Builder clearMoveToTrash() { bitField0_ = (bitField0_ & ~0x00000008); moveToTrash_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.Rename2RequestProto) } static { defaultInstance = new Rename2RequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.Rename2RequestProto) } public interface Rename2ResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.Rename2ResponseProto} * *
   * void response
   * 
*/ public static final class Rename2ResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements Rename2ResponseProtoOrBuilder { // Use Rename2ResponseProto.newBuilder() to construct. private Rename2ResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private Rename2ResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final Rename2ResponseProto defaultInstance; public static Rename2ResponseProto getDefaultInstance() { return defaultInstance; } public Rename2ResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private Rename2ResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_Rename2ResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_Rename2ResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public Rename2ResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new Rename2ResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.Rename2ResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_Rename2ResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_Rename2ResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_Rename2ResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.Rename2ResponseProto) } static { defaultInstance = new Rename2ResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.Rename2ResponseProto) } public interface DeleteRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required bool recursive = 2; /** * required bool recursive = 2; */ boolean hasRecursive(); /** * required bool recursive = 2; */ boolean getRecursive(); } /** * Protobuf type {@code hadoop.hdfs.DeleteRequestProto} */ public static final class DeleteRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements DeleteRequestProtoOrBuilder { // Use DeleteRequestProto.newBuilder() to construct. private DeleteRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DeleteRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DeleteRequestProto defaultInstance; public static DeleteRequestProto getDefaultInstance() { return defaultInstance; } public DeleteRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DeleteRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; recursive_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public DeleteRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new DeleteRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required bool recursive = 2; public static final int RECURSIVE_FIELD_NUMBER = 2; private boolean recursive_; /** * required bool recursive = 2; */ public boolean hasRecursive() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bool recursive = 2; */ public boolean getRecursive() { return recursive_; } private void initFields() { src_ = ""; recursive_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasRecursive()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(2, recursive_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(2, recursive_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasRecursive() == other.hasRecursive()); if (hasRecursive()) { result = result && (getRecursive() == other.getRecursive()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasRecursive()) { hash = (37 * hash) + RECURSIVE_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getRecursive()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DeleteRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); recursive_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.recursive_ = recursive_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasRecursive()) { setRecursive(other.getRecursive()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasRecursive()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required bool recursive = 2; private boolean recursive_ ; /** * required bool recursive = 2; */ public boolean hasRecursive() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bool recursive = 2; */ public boolean getRecursive() { return recursive_; } /** * required bool recursive = 2; */ public Builder setRecursive(boolean value) { bitField0_ |= 0x00000002; recursive_ = value; onChanged(); return this; } /** * required bool recursive = 2; */ public Builder clearRecursive() { bitField0_ = (bitField0_ & ~0x00000002); recursive_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DeleteRequestProto) } static { defaultInstance = new DeleteRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DeleteRequestProto) } public interface DeleteResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required bool result = 1; /** * required bool result = 1; */ boolean hasResult(); /** * required bool result = 1; */ boolean getResult(); } /** * Protobuf type {@code hadoop.hdfs.DeleteResponseProto} */ public static final class DeleteResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements DeleteResponseProtoOrBuilder { // Use DeleteResponseProto.newBuilder() to construct. private DeleteResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DeleteResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DeleteResponseProto defaultInstance; public static DeleteResponseProto getDefaultInstance() { return defaultInstance; } public DeleteResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DeleteResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; result_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public DeleteResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new DeleteResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool result = 1; public static final int RESULT_FIELD_NUMBER = 1; private boolean result_; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } private void initFields() { result_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasResult()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, result_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, result_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto) obj; boolean result = true; result = result && (hasResult() == other.hasResult()); if (hasResult()) { result = result && (getResult() == other.getResult()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getResult()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DeleteResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); result_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.result_ = result_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance()) return this; if (other.hasResult()) { setResult(other.getResult()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasResult()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool result = 1; private boolean result_ ; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } /** * required bool result = 1; */ public Builder setResult(boolean value) { bitField0_ |= 0x00000001; result_ = value; onChanged(); return this; } /** * required bool result = 1; */ public Builder clearResult() { bitField0_ = (bitField0_ & ~0x00000001); result_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DeleteResponseProto) } static { defaultInstance = new DeleteResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DeleteResponseProto) } public interface MkdirsRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required .hadoop.hdfs.FsPermissionProto masked = 2; /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ boolean hasMasked(); /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getMasked(); /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder(); // required bool createParent = 3; /** * required bool createParent = 3; */ boolean hasCreateParent(); /** * required bool createParent = 3; */ boolean getCreateParent(); // optional .hadoop.hdfs.FsPermissionProto unmasked = 4; /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ boolean hasUnmasked(); /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getUnmasked(); /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getUnmaskedOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.MkdirsRequestProto} */ public static final class MkdirsRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements MkdirsRequestProtoOrBuilder { // Use MkdirsRequestProto.newBuilder() to construct. private MkdirsRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private MkdirsRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final MkdirsRequestProto defaultInstance; public static MkdirsRequestProto getDefaultInstance() { return defaultInstance; } public MkdirsRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private MkdirsRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = masked_.toBuilder(); } masked_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(masked_); masked_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } case 24: { bitField0_ |= 0x00000004; createParent_ = input.readBool(); break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) == 0x00000008)) { subBuilder = unmasked_.toBuilder(); } unmasked_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(unmasked_); unmasked_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MkdirsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MkdirsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public MkdirsRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new MkdirsRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required .hadoop.hdfs.FsPermissionProto masked = 2; public static final int MASKED_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto masked_; /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public boolean hasMasked() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getMasked() { return masked_; } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder() { return masked_; } // required bool createParent = 3; public static final int CREATEPARENT_FIELD_NUMBER = 3; private boolean createParent_; /** * required bool createParent = 3; */ public boolean hasCreateParent() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bool createParent = 3; */ public boolean getCreateParent() { return createParent_; } // optional .hadoop.hdfs.FsPermissionProto unmasked = 4; public static final int UNMASKED_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto unmasked_; /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ public boolean hasUnmasked() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getUnmasked() { return unmasked_; } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getUnmaskedOrBuilder() { return unmasked_; } private void initFields() { src_ = ""; masked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); createParent_ = false; unmasked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasMasked()) { memoizedIsInitialized = 0; return false; } if (!hasCreateParent()) { memoizedIsInitialized = 0; return false; } if (!getMasked().isInitialized()) { memoizedIsInitialized = 0; return false; } if (hasUnmasked()) { if (!getUnmasked().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, masked_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBool(3, createParent_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeMessage(4, unmasked_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(2, masked_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(3, createParent_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(4, unmasked_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasMasked() == other.hasMasked()); if (hasMasked()) { result = result && getMasked() .equals(other.getMasked()); } result = result && (hasCreateParent() == other.hasCreateParent()); if (hasCreateParent()) { result = result && (getCreateParent() == other.getCreateParent()); } result = result && (hasUnmasked() == other.hasUnmasked()); if (hasUnmasked()) { result = result && getUnmasked() .equals(other.getUnmasked()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasMasked()) { hash = (37 * hash) + MASKED_FIELD_NUMBER; hash = (53 * hash) + getMasked().hashCode(); } if (hasCreateParent()) { hash = (37 * hash) + CREATEPARENT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getCreateParent()); } if (hasUnmasked()) { hash = (37 * hash) + UNMASKED_FIELD_NUMBER; hash = (53 * hash) + getUnmasked().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.MkdirsRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MkdirsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MkdirsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getMaskedFieldBuilder(); getUnmaskedFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (maskedBuilder_ == null) { masked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); } else { maskedBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); createParent_ = false; bitField0_ = (bitField0_ & ~0x00000004); if (unmaskedBuilder_ == null) { unmasked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); } else { unmaskedBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MkdirsRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (maskedBuilder_ == null) { result.masked_ = masked_; } else { result.masked_ = maskedBuilder_.build(); } if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.createParent_ = createParent_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } if (unmaskedBuilder_ == null) { result.unmasked_ = unmasked_; } else { result.unmasked_ = unmaskedBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasMasked()) { mergeMasked(other.getMasked()); } if (other.hasCreateParent()) { setCreateParent(other.getCreateParent()); } if (other.hasUnmasked()) { mergeUnmasked(other.getUnmasked()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasMasked()) { return false; } if (!hasCreateParent()) { return false; } if (!getMasked().isInitialized()) { return false; } if (hasUnmasked()) { if (!getUnmasked().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required .hadoop.hdfs.FsPermissionProto masked = 2; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto masked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> maskedBuilder_; /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public boolean hasMasked() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getMasked() { if (maskedBuilder_ == null) { return masked_; } else { return maskedBuilder_.getMessage(); } } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public Builder setMasked(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (maskedBuilder_ == null) { if (value == null) { throw new NullPointerException(); } masked_ = value; onChanged(); } else { maskedBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public Builder setMasked( org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) { if (maskedBuilder_ == null) { masked_ = builderForValue.build(); onChanged(); } else { maskedBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public Builder mergeMasked(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (maskedBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && masked_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) { masked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.newBuilder(masked_).mergeFrom(value).buildPartial(); } else { masked_ = value; } onChanged(); } else { maskedBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public Builder clearMasked() { if (maskedBuilder_ == null) { masked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); onChanged(); } else { maskedBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getMaskedBuilder() { bitField0_ |= 0x00000002; onChanged(); return getMaskedFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder() { if (maskedBuilder_ != null) { return maskedBuilder_.getMessageOrBuilder(); } else { return masked_; } } /** * required .hadoop.hdfs.FsPermissionProto masked = 2; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> getMaskedFieldBuilder() { if (maskedBuilder_ == null) { maskedBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>( masked_, getParentForChildren(), isClean()); masked_ = null; } return maskedBuilder_; } // required bool createParent = 3; private boolean createParent_ ; /** * required bool createParent = 3; */ public boolean hasCreateParent() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bool createParent = 3; */ public boolean getCreateParent() { return createParent_; } /** * required bool createParent = 3; */ public Builder setCreateParent(boolean value) { bitField0_ |= 0x00000004; createParent_ = value; onChanged(); return this; } /** * required bool createParent = 3; */ public Builder clearCreateParent() { bitField0_ = (bitField0_ & ~0x00000004); createParent_ = false; onChanged(); return this; } // optional .hadoop.hdfs.FsPermissionProto unmasked = 4; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto unmasked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> unmaskedBuilder_; /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ public boolean hasUnmasked() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getUnmasked() { if (unmaskedBuilder_ == null) { return unmasked_; } else { return unmaskedBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ public Builder setUnmasked(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (unmaskedBuilder_ == null) { if (value == null) { throw new NullPointerException(); } unmasked_ = value; onChanged(); } else { unmaskedBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ public Builder setUnmasked( org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) { if (unmaskedBuilder_ == null) { unmasked_ = builderForValue.build(); onChanged(); } else { unmaskedBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ public Builder mergeUnmasked(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (unmaskedBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && unmasked_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) { unmasked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.newBuilder(unmasked_).mergeFrom(value).buildPartial(); } else { unmasked_ = value; } onChanged(); } else { unmaskedBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ public Builder clearUnmasked() { if (unmaskedBuilder_ == null) { unmasked_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); onChanged(); } else { unmaskedBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getUnmaskedBuilder() { bitField0_ |= 0x00000008; onChanged(); return getUnmaskedFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getUnmaskedOrBuilder() { if (unmaskedBuilder_ != null) { return unmaskedBuilder_.getMessageOrBuilder(); } else { return unmasked_; } } /** * optional .hadoop.hdfs.FsPermissionProto unmasked = 4; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> getUnmaskedFieldBuilder() { if (unmaskedBuilder_ == null) { unmaskedBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>( unmasked_, getParentForChildren(), isClean()); unmasked_ = null; } return unmaskedBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.MkdirsRequestProto) } static { defaultInstance = new MkdirsRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.MkdirsRequestProto) } public interface MkdirsResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required bool result = 1; /** * required bool result = 1; */ boolean hasResult(); /** * required bool result = 1; */ boolean getResult(); } /** * Protobuf type {@code hadoop.hdfs.MkdirsResponseProto} */ public static final class MkdirsResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements MkdirsResponseProtoOrBuilder { // Use MkdirsResponseProto.newBuilder() to construct. private MkdirsResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private MkdirsResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final MkdirsResponseProto defaultInstance; public static MkdirsResponseProto getDefaultInstance() { return defaultInstance; } public MkdirsResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private MkdirsResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; result_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MkdirsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MkdirsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public MkdirsResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new MkdirsResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool result = 1; public static final int RESULT_FIELD_NUMBER = 1; private boolean result_; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } private void initFields() { result_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasResult()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, result_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, result_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto) obj; boolean result = true; result = result && (hasResult() == other.hasResult()); if (hasResult()) { result = result && (getResult() == other.getResult()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getResult()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.MkdirsResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MkdirsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MkdirsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); result_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MkdirsResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.result_ = result_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance()) return this; if (other.hasResult()) { setResult(other.getResult()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasResult()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool result = 1; private boolean result_ ; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } /** * required bool result = 1; */ public Builder setResult(boolean value) { bitField0_ |= 0x00000001; result_ = value; onChanged(); return this; } /** * required bool result = 1; */ public Builder clearResult() { bitField0_ = (bitField0_ & ~0x00000001); result_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.MkdirsResponseProto) } static { defaultInstance = new MkdirsResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.MkdirsResponseProto) } public interface GetListingRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required bytes startAfter = 2; /** * required bytes startAfter = 2; */ boolean hasStartAfter(); /** * required bytes startAfter = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getStartAfter(); // required bool needLocation = 3; /** * required bool needLocation = 3; */ boolean hasNeedLocation(); /** * required bool needLocation = 3; */ boolean getNeedLocation(); } /** * Protobuf type {@code hadoop.hdfs.GetListingRequestProto} */ public static final class GetListingRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetListingRequestProtoOrBuilder { // Use GetListingRequestProto.newBuilder() to construct. private GetListingRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetListingRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetListingRequestProto defaultInstance; public static GetListingRequestProto getDefaultInstance() { return defaultInstance; } public GetListingRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetListingRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; startAfter_ = input.readBytes(); break; } case 24: { bitField0_ |= 0x00000004; needLocation_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetListingRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetListingRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetListingRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetListingRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required bytes startAfter = 2; public static final int STARTAFTER_FIELD_NUMBER = 2; private io.prestosql.hadoop.$internal.com.google.protobuf.ByteString startAfter_; /** * required bytes startAfter = 2; */ public boolean hasStartAfter() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bytes startAfter = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getStartAfter() { return startAfter_; } // required bool needLocation = 3; public static final int NEEDLOCATION_FIELD_NUMBER = 3; private boolean needLocation_; /** * required bool needLocation = 3; */ public boolean hasNeedLocation() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bool needLocation = 3; */ public boolean getNeedLocation() { return needLocation_; } private void initFields() { src_ = ""; startAfter_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY; needLocation_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasStartAfter()) { memoizedIsInitialized = 0; return false; } if (!hasNeedLocation()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, startAfter_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBool(3, needLocation_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, startAfter_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(3, needLocation_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasStartAfter() == other.hasStartAfter()); if (hasStartAfter()) { result = result && getStartAfter() .equals(other.getStartAfter()); } result = result && (hasNeedLocation() == other.hasNeedLocation()); if (hasNeedLocation()) { result = result && (getNeedLocation() == other.getNeedLocation()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasStartAfter()) { hash = (37 * hash) + STARTAFTER_FIELD_NUMBER; hash = (53 * hash) + getStartAfter().hashCode(); } if (hasNeedLocation()) { hash = (37 * hash) + NEEDLOCATION_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getNeedLocation()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetListingRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetListingRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetListingRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); startAfter_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); needLocation_ = false; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetListingRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.startAfter_ = startAfter_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.needLocation_ = needLocation_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasStartAfter()) { setStartAfter(other.getStartAfter()); } if (other.hasNeedLocation()) { setNeedLocation(other.getNeedLocation()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasStartAfter()) { return false; } if (!hasNeedLocation()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required bytes startAfter = 2; private io.prestosql.hadoop.$internal.com.google.protobuf.ByteString startAfter_ = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.EMPTY; /** * required bytes startAfter = 2; */ public boolean hasStartAfter() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bytes startAfter = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getStartAfter() { return startAfter_; } /** * required bytes startAfter = 2; */ public Builder setStartAfter(io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; startAfter_ = value; onChanged(); return this; } /** * required bytes startAfter = 2; */ public Builder clearStartAfter() { bitField0_ = (bitField0_ & ~0x00000002); startAfter_ = getDefaultInstance().getStartAfter(); onChanged(); return this; } // required bool needLocation = 3; private boolean needLocation_ ; /** * required bool needLocation = 3; */ public boolean hasNeedLocation() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required bool needLocation = 3; */ public boolean getNeedLocation() { return needLocation_; } /** * required bool needLocation = 3; */ public Builder setNeedLocation(boolean value) { bitField0_ |= 0x00000004; needLocation_ = value; onChanged(); return this; } /** * required bool needLocation = 3; */ public Builder clearNeedLocation() { bitField0_ = (bitField0_ & ~0x00000004); needLocation_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetListingRequestProto) } static { defaultInstance = new GetListingRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetListingRequestProto) } public interface GetListingResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional .hadoop.hdfs.DirectoryListingProto dirList = 1; /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ boolean hasDirList(); /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDirList(); /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder getDirListOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetListingResponseProto} */ public static final class GetListingResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetListingResponseProtoOrBuilder { // Use GetListingResponseProto.newBuilder() to construct. private GetListingResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetListingResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetListingResponseProto defaultInstance; public static GetListingResponseProto getDefaultInstance() { return defaultInstance; } public GetListingResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetListingResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = dirList_.toBuilder(); } dirList_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(dirList_); dirList_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetListingResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetListingResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetListingResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetListingResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional .hadoop.hdfs.DirectoryListingProto dirList = 1; public static final int DIRLIST_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto dirList_; /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ public boolean hasDirList() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDirList() { return dirList_; } /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder getDirListOrBuilder() { return dirList_; } private void initFields() { dirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (hasDirList()) { if (!getDirList().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, dirList_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, dirList_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto) obj; boolean result = true; result = result && (hasDirList() == other.hasDirList()); if (hasDirList()) { result = result && getDirList() .equals(other.getDirList()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasDirList()) { hash = (37 * hash) + DIRLIST_FIELD_NUMBER; hash = (53 * hash) + getDirList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetListingResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetListingResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetListingResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getDirListFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (dirListBuilder_ == null) { dirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); } else { dirListBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetListingResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (dirListBuilder_ == null) { result.dirList_ = dirList_; } else { result.dirList_ = dirListBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance()) return this; if (other.hasDirList()) { mergeDirList(other.getDirList()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (hasDirList()) { if (!getDirList().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional .hadoop.hdfs.DirectoryListingProto dirList = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto dirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder> dirListBuilder_; /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ public boolean hasDirList() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDirList() { if (dirListBuilder_ == null) { return dirList_; } else { return dirListBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ public Builder setDirList(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto value) { if (dirListBuilder_ == null) { if (value == null) { throw new NullPointerException(); } dirList_ = value; onChanged(); } else { dirListBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ public Builder setDirList( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder builderForValue) { if (dirListBuilder_ == null) { dirList_ = builderForValue.build(); onChanged(); } else { dirListBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ public Builder mergeDirList(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto value) { if (dirListBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && dirList_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance()) { dirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.newBuilder(dirList_).mergeFrom(value).buildPartial(); } else { dirList_ = value; } onChanged(); } else { dirListBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ public Builder clearDirList() { if (dirListBuilder_ == null) { dirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); onChanged(); } else { dirListBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder getDirListBuilder() { bitField0_ |= 0x00000001; onChanged(); return getDirListFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder getDirListOrBuilder() { if (dirListBuilder_ != null) { return dirListBuilder_.getMessageOrBuilder(); } else { return dirList_; } } /** * optional .hadoop.hdfs.DirectoryListingProto dirList = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder> getDirListFieldBuilder() { if (dirListBuilder_ == null) { dirListBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder>( dirList_, getParentForChildren(), isClean()); dirList_ = null; } return dirListBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetListingResponseProto) } static { defaultInstance = new GetListingResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetListingResponseProto) } public interface GetSnapshottableDirListingRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.GetSnapshottableDirListingRequestProto} * *
   * no input parameters
   * 
*/ public static final class GetSnapshottableDirListingRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetSnapshottableDirListingRequestProtoOrBuilder { // Use GetSnapshottableDirListingRequestProto.newBuilder() to construct. private GetSnapshottableDirListingRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetSnapshottableDirListingRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetSnapshottableDirListingRequestProto defaultInstance; public static GetSnapshottableDirListingRequestProto getDefaultInstance() { return defaultInstance; } public GetSnapshottableDirListingRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetSnapshottableDirListingRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshottableDirListingRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshottableDirListingRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetSnapshottableDirListingRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetSnapshottableDirListingRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetSnapshottableDirListingRequestProto} * *
     * no input parameters
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshottableDirListingRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshottableDirListingRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshottableDirListingRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetSnapshottableDirListingRequestProto) } static { defaultInstance = new GetSnapshottableDirListingRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetSnapshottableDirListingRequestProto) } public interface GetSnapshottableDirListingResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ boolean hasSnapshottableDirList(); /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getSnapshottableDirList(); /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProtoOrBuilder getSnapshottableDirListOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetSnapshottableDirListingResponseProto} */ public static final class GetSnapshottableDirListingResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetSnapshottableDirListingResponseProtoOrBuilder { // Use GetSnapshottableDirListingResponseProto.newBuilder() to construct. private GetSnapshottableDirListingResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetSnapshottableDirListingResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetSnapshottableDirListingResponseProto defaultInstance; public static GetSnapshottableDirListingResponseProto getDefaultInstance() { return defaultInstance; } public GetSnapshottableDirListingResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetSnapshottableDirListingResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = snapshottableDirList_.toBuilder(); } snapshottableDirList_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(snapshottableDirList_); snapshottableDirList_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshottableDirListingResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshottableDirListingResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetSnapshottableDirListingResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetSnapshottableDirListingResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; public static final int SNAPSHOTTABLEDIRLIST_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto snapshottableDirList_; /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ public boolean hasSnapshottableDirList() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getSnapshottableDirList() { return snapshottableDirList_; } /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProtoOrBuilder getSnapshottableDirListOrBuilder() { return snapshottableDirList_; } private void initFields() { snapshottableDirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (hasSnapshottableDirList()) { if (!getSnapshottableDirList().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, snapshottableDirList_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, snapshottableDirList_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto) obj; boolean result = true; result = result && (hasSnapshottableDirList() == other.hasSnapshottableDirList()); if (hasSnapshottableDirList()) { result = result && getSnapshottableDirList() .equals(other.getSnapshottableDirList()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSnapshottableDirList()) { hash = (37 * hash) + SNAPSHOTTABLEDIRLIST_FIELD_NUMBER; hash = (53 * hash) + getSnapshottableDirList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetSnapshottableDirListingResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshottableDirListingResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshottableDirListingResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getSnapshottableDirListFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (snapshottableDirListBuilder_ == null) { snapshottableDirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance(); } else { snapshottableDirListBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshottableDirListingResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (snapshottableDirListBuilder_ == null) { result.snapshottableDirList_ = snapshottableDirList_; } else { result.snapshottableDirList_ = snapshottableDirListBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.getDefaultInstance()) return this; if (other.hasSnapshottableDirList()) { mergeSnapshottableDirList(other.getSnapshottableDirList()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (hasSnapshottableDirList()) { if (!getSnapshottableDirList().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto snapshottableDirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProtoOrBuilder> snapshottableDirListBuilder_; /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ public boolean hasSnapshottableDirList() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getSnapshottableDirList() { if (snapshottableDirListBuilder_ == null) { return snapshottableDirList_; } else { return snapshottableDirListBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ public Builder setSnapshottableDirList(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto value) { if (snapshottableDirListBuilder_ == null) { if (value == null) { throw new NullPointerException(); } snapshottableDirList_ = value; onChanged(); } else { snapshottableDirListBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ public Builder setSnapshottableDirList( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder builderForValue) { if (snapshottableDirListBuilder_ == null) { snapshottableDirList_ = builderForValue.build(); onChanged(); } else { snapshottableDirListBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ public Builder mergeSnapshottableDirList(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto value) { if (snapshottableDirListBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && snapshottableDirList_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance()) { snapshottableDirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.newBuilder(snapshottableDirList_).mergeFrom(value).buildPartial(); } else { snapshottableDirList_ = value; } onChanged(); } else { snapshottableDirListBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ public Builder clearSnapshottableDirList() { if (snapshottableDirListBuilder_ == null) { snapshottableDirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance(); onChanged(); } else { snapshottableDirListBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder getSnapshottableDirListBuilder() { bitField0_ |= 0x00000001; onChanged(); return getSnapshottableDirListFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProtoOrBuilder getSnapshottableDirListOrBuilder() { if (snapshottableDirListBuilder_ != null) { return snapshottableDirListBuilder_.getMessageOrBuilder(); } else { return snapshottableDirList_; } } /** * optional .hadoop.hdfs.SnapshottableDirectoryListingProto snapshottableDirList = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProtoOrBuilder> getSnapshottableDirListFieldBuilder() { if (snapshottableDirListBuilder_ == null) { snapshottableDirListBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProtoOrBuilder>( snapshottableDirList_, getParentForChildren(), isClean()); snapshottableDirList_ = null; } return snapshottableDirListBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetSnapshottableDirListingResponseProto) } static { defaultInstance = new GetSnapshottableDirListingResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetSnapshottableDirListingResponseProto) } public interface GetSnapshotDiffReportRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string snapshotRoot = 1; /** * required string snapshotRoot = 1; */ boolean hasSnapshotRoot(); /** * required string snapshotRoot = 1; */ java.lang.String getSnapshotRoot(); /** * required string snapshotRoot = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes(); // required string fromSnapshot = 2; /** * required string fromSnapshot = 2; */ boolean hasFromSnapshot(); /** * required string fromSnapshot = 2; */ java.lang.String getFromSnapshot(); /** * required string fromSnapshot = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFromSnapshotBytes(); // required string toSnapshot = 3; /** * required string toSnapshot = 3; */ boolean hasToSnapshot(); /** * required string toSnapshot = 3; */ java.lang.String getToSnapshot(); /** * required string toSnapshot = 3; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getToSnapshotBytes(); } /** * Protobuf type {@code hadoop.hdfs.GetSnapshotDiffReportRequestProto} */ public static final class GetSnapshotDiffReportRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetSnapshotDiffReportRequestProtoOrBuilder { // Use GetSnapshotDiffReportRequestProto.newBuilder() to construct. private GetSnapshotDiffReportRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetSnapshotDiffReportRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetSnapshotDiffReportRequestProto defaultInstance; public static GetSnapshotDiffReportRequestProto getDefaultInstance() { return defaultInstance; } public GetSnapshotDiffReportRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetSnapshotDiffReportRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; snapshotRoot_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; fromSnapshot_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; toSnapshot_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetSnapshotDiffReportRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetSnapshotDiffReportRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string snapshotRoot = 1; public static final int SNAPSHOTROOT_FIELD_NUMBER = 1; private java.lang.Object snapshotRoot_; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string fromSnapshot = 2; public static final int FROMSNAPSHOT_FIELD_NUMBER = 2; private java.lang.Object fromSnapshot_; /** * required string fromSnapshot = 2; */ public boolean hasFromSnapshot() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string fromSnapshot = 2; */ public java.lang.String getFromSnapshot() { java.lang.Object ref = fromSnapshot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fromSnapshot_ = s; } return s; } } /** * required string fromSnapshot = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFromSnapshotBytes() { java.lang.Object ref = fromSnapshot_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromSnapshot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string toSnapshot = 3; public static final int TOSNAPSHOT_FIELD_NUMBER = 3; private java.lang.Object toSnapshot_; /** * required string toSnapshot = 3; */ public boolean hasToSnapshot() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string toSnapshot = 3; */ public java.lang.String getToSnapshot() { java.lang.Object ref = toSnapshot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { toSnapshot_ = s; } return s; } } /** * required string toSnapshot = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getToSnapshotBytes() { java.lang.Object ref = toSnapshot_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); toSnapshot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { snapshotRoot_ = ""; fromSnapshot_ = ""; toSnapshot_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } if (!hasFromSnapshot()) { memoizedIsInitialized = 0; return false; } if (!hasToSnapshot()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getFromSnapshotBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getToSnapshotBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getFromSnapshotBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(3, getToSnapshotBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto) obj; boolean result = true; result = result && (hasSnapshotRoot() == other.hasSnapshotRoot()); if (hasSnapshotRoot()) { result = result && getSnapshotRoot() .equals(other.getSnapshotRoot()); } result = result && (hasFromSnapshot() == other.hasFromSnapshot()); if (hasFromSnapshot()) { result = result && getFromSnapshot() .equals(other.getFromSnapshot()); } result = result && (hasToSnapshot() == other.hasToSnapshot()); if (hasToSnapshot()) { result = result && getToSnapshot() .equals(other.getToSnapshot()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } if (hasFromSnapshot()) { hash = (37 * hash) + FROMSNAPSHOT_FIELD_NUMBER; hash = (53 * hash) + getFromSnapshot().hashCode(); } if (hasToSnapshot()) { hash = (37 * hash) + TOSNAPSHOT_FIELD_NUMBER; hash = (53 * hash) + getToSnapshot().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetSnapshotDiffReportRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); snapshotRoot_ = ""; bitField0_ = (bitField0_ & ~0x00000001); fromSnapshot_ = ""; bitField0_ = (bitField0_ & ~0x00000002); toSnapshot_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.snapshotRoot_ = snapshotRoot_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.fromSnapshot_ = fromSnapshot_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.toSnapshot_ = toSnapshot_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto.getDefaultInstance()) return this; if (other.hasSnapshotRoot()) { bitField0_ |= 0x00000001; snapshotRoot_ = other.snapshotRoot_; onChanged(); } if (other.hasFromSnapshot()) { bitField0_ |= 0x00000002; fromSnapshot_ = other.fromSnapshot_; onChanged(); } if (other.hasToSnapshot()) { bitField0_ |= 0x00000004; toSnapshot_ = other.toSnapshot_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSnapshotRoot()) { return false; } if (!hasFromSnapshot()) { return false; } if (!hasToSnapshot()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string snapshotRoot = 1; private java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotRoot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder clearSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000001); snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRootBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } // required string fromSnapshot = 2; private java.lang.Object fromSnapshot_ = ""; /** * required string fromSnapshot = 2; */ public boolean hasFromSnapshot() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string fromSnapshot = 2; */ public java.lang.String getFromSnapshot() { java.lang.Object ref = fromSnapshot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); fromSnapshot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string fromSnapshot = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFromSnapshotBytes() { java.lang.Object ref = fromSnapshot_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromSnapshot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string fromSnapshot = 2; */ public Builder setFromSnapshot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; fromSnapshot_ = value; onChanged(); return this; } /** * required string fromSnapshot = 2; */ public Builder clearFromSnapshot() { bitField0_ = (bitField0_ & ~0x00000002); fromSnapshot_ = getDefaultInstance().getFromSnapshot(); onChanged(); return this; } /** * required string fromSnapshot = 2; */ public Builder setFromSnapshotBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; fromSnapshot_ = value; onChanged(); return this; } // required string toSnapshot = 3; private java.lang.Object toSnapshot_ = ""; /** * required string toSnapshot = 3; */ public boolean hasToSnapshot() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string toSnapshot = 3; */ public java.lang.String getToSnapshot() { java.lang.Object ref = toSnapshot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); toSnapshot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string toSnapshot = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getToSnapshotBytes() { java.lang.Object ref = toSnapshot_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); toSnapshot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string toSnapshot = 3; */ public Builder setToSnapshot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; toSnapshot_ = value; onChanged(); return this; } /** * required string toSnapshot = 3; */ public Builder clearToSnapshot() { bitField0_ = (bitField0_ & ~0x00000004); toSnapshot_ = getDefaultInstance().getToSnapshot(); onChanged(); return this; } /** * required string toSnapshot = 3; */ public Builder setToSnapshotBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; toSnapshot_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetSnapshotDiffReportRequestProto) } static { defaultInstance = new GetSnapshotDiffReportRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetSnapshotDiffReportRequestProto) } public interface GetSnapshotDiffReportResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ boolean hasDiffReport(); /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDiffReport(); /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProtoOrBuilder getDiffReportOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetSnapshotDiffReportResponseProto} */ public static final class GetSnapshotDiffReportResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetSnapshotDiffReportResponseProtoOrBuilder { // Use GetSnapshotDiffReportResponseProto.newBuilder() to construct. private GetSnapshotDiffReportResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetSnapshotDiffReportResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetSnapshotDiffReportResponseProto defaultInstance; public static GetSnapshotDiffReportResponseProto getDefaultInstance() { return defaultInstance; } public GetSnapshotDiffReportResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetSnapshotDiffReportResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = diffReport_.toBuilder(); } diffReport_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(diffReport_); diffReport_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetSnapshotDiffReportResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetSnapshotDiffReportResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; public static final int DIFFREPORT_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto diffReport_; /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ public boolean hasDiffReport() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDiffReport() { return diffReport_; } /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProtoOrBuilder getDiffReportOrBuilder() { return diffReport_; } private void initFields() { diffReport_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasDiffReport()) { memoizedIsInitialized = 0; return false; } if (!getDiffReport().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, diffReport_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, diffReport_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto) obj; boolean result = true; result = result && (hasDiffReport() == other.hasDiffReport()); if (hasDiffReport()) { result = result && getDiffReport() .equals(other.getDiffReport()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasDiffReport()) { hash = (37 * hash) + DIFFREPORT_FIELD_NUMBER; hash = (53 * hash) + getDiffReport().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetSnapshotDiffReportResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getDiffReportFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (diffReportBuilder_ == null) { diffReport_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance(); } else { diffReportBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (diffReportBuilder_ == null) { result.diffReport_ = diffReport_; } else { result.diffReport_ = diffReportBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.getDefaultInstance()) return this; if (other.hasDiffReport()) { mergeDiffReport(other.getDiffReport()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasDiffReport()) { return false; } if (!getDiffReport().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto diffReport_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProtoOrBuilder> diffReportBuilder_; /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ public boolean hasDiffReport() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDiffReport() { if (diffReportBuilder_ == null) { return diffReport_; } else { return diffReportBuilder_.getMessage(); } } /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ public Builder setDiffReport(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto value) { if (diffReportBuilder_ == null) { if (value == null) { throw new NullPointerException(); } diffReport_ = value; onChanged(); } else { diffReportBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ public Builder setDiffReport( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder builderForValue) { if (diffReportBuilder_ == null) { diffReport_ = builderForValue.build(); onChanged(); } else { diffReportBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ public Builder mergeDiffReport(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto value) { if (diffReportBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && diffReport_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance()) { diffReport_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.newBuilder(diffReport_).mergeFrom(value).buildPartial(); } else { diffReport_ = value; } onChanged(); } else { diffReportBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ public Builder clearDiffReport() { if (diffReportBuilder_ == null) { diffReport_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance(); onChanged(); } else { diffReportBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder getDiffReportBuilder() { bitField0_ |= 0x00000001; onChanged(); return getDiffReportFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProtoOrBuilder getDiffReportOrBuilder() { if (diffReportBuilder_ != null) { return diffReportBuilder_.getMessageOrBuilder(); } else { return diffReport_; } } /** * required .hadoop.hdfs.SnapshotDiffReportProto diffReport = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProtoOrBuilder> getDiffReportFieldBuilder() { if (diffReportBuilder_ == null) { diffReportBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProtoOrBuilder>( diffReport_, getParentForChildren(), isClean()); diffReport_ = null; } return diffReportBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetSnapshotDiffReportResponseProto) } static { defaultInstance = new GetSnapshotDiffReportResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetSnapshotDiffReportResponseProto) } public interface GetSnapshotDiffReportListingRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string snapshotRoot = 1; /** * required string snapshotRoot = 1; */ boolean hasSnapshotRoot(); /** * required string snapshotRoot = 1; */ java.lang.String getSnapshotRoot(); /** * required string snapshotRoot = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes(); // required string fromSnapshot = 2; /** * required string fromSnapshot = 2; */ boolean hasFromSnapshot(); /** * required string fromSnapshot = 2; */ java.lang.String getFromSnapshot(); /** * required string fromSnapshot = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFromSnapshotBytes(); // required string toSnapshot = 3; /** * required string toSnapshot = 3; */ boolean hasToSnapshot(); /** * required string toSnapshot = 3; */ java.lang.String getToSnapshot(); /** * required string toSnapshot = 3; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getToSnapshotBytes(); // optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ boolean hasCursor(); /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor(); /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetSnapshotDiffReportListingRequestProto} */ public static final class GetSnapshotDiffReportListingRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetSnapshotDiffReportListingRequestProtoOrBuilder { // Use GetSnapshotDiffReportListingRequestProto.newBuilder() to construct. private GetSnapshotDiffReportListingRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetSnapshotDiffReportListingRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetSnapshotDiffReportListingRequestProto defaultInstance; public static GetSnapshotDiffReportListingRequestProto getDefaultInstance() { return defaultInstance; } public GetSnapshotDiffReportListingRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetSnapshotDiffReportListingRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; snapshotRoot_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; fromSnapshot_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; toSnapshot_ = input.readBytes(); break; } case 34: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder subBuilder = null; if (((bitField0_ & 0x00000008) == 0x00000008)) { subBuilder = cursor_.toBuilder(); } cursor_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(cursor_); cursor_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000008; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportListingRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportListingRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetSnapshotDiffReportListingRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetSnapshotDiffReportListingRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string snapshotRoot = 1; public static final int SNAPSHOTROOT_FIELD_NUMBER = 1; private java.lang.Object snapshotRoot_; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string fromSnapshot = 2; public static final int FROMSNAPSHOT_FIELD_NUMBER = 2; private java.lang.Object fromSnapshot_; /** * required string fromSnapshot = 2; */ public boolean hasFromSnapshot() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string fromSnapshot = 2; */ public java.lang.String getFromSnapshot() { java.lang.Object ref = fromSnapshot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { fromSnapshot_ = s; } return s; } } /** * required string fromSnapshot = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFromSnapshotBytes() { java.lang.Object ref = fromSnapshot_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromSnapshot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string toSnapshot = 3; public static final int TOSNAPSHOT_FIELD_NUMBER = 3; private java.lang.Object toSnapshot_; /** * required string toSnapshot = 3; */ public boolean hasToSnapshot() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string toSnapshot = 3; */ public java.lang.String getToSnapshot() { java.lang.Object ref = toSnapshot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { toSnapshot_ = s; } return s; } } /** * required string toSnapshot = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getToSnapshotBytes() { java.lang.Object ref = toSnapshot_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); toSnapshot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; public static final int CURSOR_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto cursor_; /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ public boolean hasCursor() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor() { return cursor_; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder() { return cursor_; } private void initFields() { snapshotRoot_ = ""; fromSnapshot_ = ""; toSnapshot_ = ""; cursor_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } if (!hasFromSnapshot()) { memoizedIsInitialized = 0; return false; } if (!hasToSnapshot()) { memoizedIsInitialized = 0; return false; } if (hasCursor()) { if (!getCursor().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getFromSnapshotBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getToSnapshotBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeMessage(4, cursor_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getFromSnapshotBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(3, getToSnapshotBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(4, cursor_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto) obj; boolean result = true; result = result && (hasSnapshotRoot() == other.hasSnapshotRoot()); if (hasSnapshotRoot()) { result = result && getSnapshotRoot() .equals(other.getSnapshotRoot()); } result = result && (hasFromSnapshot() == other.hasFromSnapshot()); if (hasFromSnapshot()) { result = result && getFromSnapshot() .equals(other.getFromSnapshot()); } result = result && (hasToSnapshot() == other.hasToSnapshot()); if (hasToSnapshot()) { result = result && getToSnapshot() .equals(other.getToSnapshot()); } result = result && (hasCursor() == other.hasCursor()); if (hasCursor()) { result = result && getCursor() .equals(other.getCursor()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } if (hasFromSnapshot()) { hash = (37 * hash) + FROMSNAPSHOT_FIELD_NUMBER; hash = (53 * hash) + getFromSnapshot().hashCode(); } if (hasToSnapshot()) { hash = (37 * hash) + TOSNAPSHOT_FIELD_NUMBER; hash = (53 * hash) + getToSnapshot().hashCode(); } if (hasCursor()) { hash = (37 * hash) + CURSOR_FIELD_NUMBER; hash = (53 * hash) + getCursor().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetSnapshotDiffReportListingRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportListingRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportListingRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getCursorFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); snapshotRoot_ = ""; bitField0_ = (bitField0_ & ~0x00000001); fromSnapshot_ = ""; bitField0_ = (bitField0_ & ~0x00000002); toSnapshot_ = ""; bitField0_ = (bitField0_ & ~0x00000004); if (cursorBuilder_ == null) { cursor_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance(); } else { cursorBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportListingRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.snapshotRoot_ = snapshotRoot_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.fromSnapshot_ = fromSnapshot_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.toSnapshot_ = toSnapshot_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } if (cursorBuilder_ == null) { result.cursor_ = cursor_; } else { result.cursor_ = cursorBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto.getDefaultInstance()) return this; if (other.hasSnapshotRoot()) { bitField0_ |= 0x00000001; snapshotRoot_ = other.snapshotRoot_; onChanged(); } if (other.hasFromSnapshot()) { bitField0_ |= 0x00000002; fromSnapshot_ = other.fromSnapshot_; onChanged(); } if (other.hasToSnapshot()) { bitField0_ |= 0x00000004; toSnapshot_ = other.toSnapshot_; onChanged(); } if (other.hasCursor()) { mergeCursor(other.getCursor()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSnapshotRoot()) { return false; } if (!hasFromSnapshot()) { return false; } if (!hasToSnapshot()) { return false; } if (hasCursor()) { if (!getCursor().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string snapshotRoot = 1; private java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotRoot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder clearSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000001); snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRootBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } // required string fromSnapshot = 2; private java.lang.Object fromSnapshot_ = ""; /** * required string fromSnapshot = 2; */ public boolean hasFromSnapshot() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string fromSnapshot = 2; */ public java.lang.String getFromSnapshot() { java.lang.Object ref = fromSnapshot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); fromSnapshot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string fromSnapshot = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFromSnapshotBytes() { java.lang.Object ref = fromSnapshot_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); fromSnapshot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string fromSnapshot = 2; */ public Builder setFromSnapshot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; fromSnapshot_ = value; onChanged(); return this; } /** * required string fromSnapshot = 2; */ public Builder clearFromSnapshot() { bitField0_ = (bitField0_ & ~0x00000002); fromSnapshot_ = getDefaultInstance().getFromSnapshot(); onChanged(); return this; } /** * required string fromSnapshot = 2; */ public Builder setFromSnapshotBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; fromSnapshot_ = value; onChanged(); return this; } // required string toSnapshot = 3; private java.lang.Object toSnapshot_ = ""; /** * required string toSnapshot = 3; */ public boolean hasToSnapshot() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string toSnapshot = 3; */ public java.lang.String getToSnapshot() { java.lang.Object ref = toSnapshot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); toSnapshot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string toSnapshot = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getToSnapshotBytes() { java.lang.Object ref = toSnapshot_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); toSnapshot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string toSnapshot = 3; */ public Builder setToSnapshot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; toSnapshot_ = value; onChanged(); return this; } /** * required string toSnapshot = 3; */ public Builder clearToSnapshot() { bitField0_ = (bitField0_ & ~0x00000004); toSnapshot_ = getDefaultInstance().getToSnapshot(); onChanged(); return this; } /** * required string toSnapshot = 3; */ public Builder setToSnapshotBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; toSnapshot_ = value; onChanged(); return this; } // optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto cursor_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder> cursorBuilder_; /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ public boolean hasCursor() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor() { if (cursorBuilder_ == null) { return cursor_; } else { return cursorBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ public Builder setCursor(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto value) { if (cursorBuilder_ == null) { if (value == null) { throw new NullPointerException(); } cursor_ = value; onChanged(); } else { cursorBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ public Builder setCursor( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder builderForValue) { if (cursorBuilder_ == null) { cursor_ = builderForValue.build(); onChanged(); } else { cursorBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ public Builder mergeCursor(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto value) { if (cursorBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && cursor_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance()) { cursor_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.newBuilder(cursor_).mergeFrom(value).buildPartial(); } else { cursor_ = value; } onChanged(); } else { cursorBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ public Builder clearCursor() { if (cursorBuilder_ == null) { cursor_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance(); onChanged(); } else { cursorBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder getCursorBuilder() { bitField0_ |= 0x00000008; onChanged(); return getCursorFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder() { if (cursorBuilder_ != null) { return cursorBuilder_.getMessageOrBuilder(); } else { return cursor_; } } /** * optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 4; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder> getCursorFieldBuilder() { if (cursorBuilder_ == null) { cursorBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder>( cursor_, getParentForChildren(), isClean()); cursor_ = null; } return cursorBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetSnapshotDiffReportListingRequestProto) } static { defaultInstance = new GetSnapshotDiffReportListingRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetSnapshotDiffReportListingRequestProto) } public interface GetSnapshotDiffReportListingResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ boolean hasDiffReport(); /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDiffReport(); /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProtoOrBuilder getDiffReportOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetSnapshotDiffReportListingResponseProto} */ public static final class GetSnapshotDiffReportListingResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetSnapshotDiffReportListingResponseProtoOrBuilder { // Use GetSnapshotDiffReportListingResponseProto.newBuilder() to construct. private GetSnapshotDiffReportListingResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetSnapshotDiffReportListingResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetSnapshotDiffReportListingResponseProto defaultInstance; public static GetSnapshotDiffReportListingResponseProto getDefaultInstance() { return defaultInstance; } public GetSnapshotDiffReportListingResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetSnapshotDiffReportListingResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = diffReport_.toBuilder(); } diffReport_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(diffReport_); diffReport_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportListingResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportListingResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetSnapshotDiffReportListingResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetSnapshotDiffReportListingResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; public static final int DIFFREPORT_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto diffReport_; /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ public boolean hasDiffReport() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDiffReport() { return diffReport_; } /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProtoOrBuilder getDiffReportOrBuilder() { return diffReport_; } private void initFields() { diffReport_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasDiffReport()) { memoizedIsInitialized = 0; return false; } if (!getDiffReport().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, diffReport_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, diffReport_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto) obj; boolean result = true; result = result && (hasDiffReport() == other.hasDiffReport()); if (hasDiffReport()) { result = result && getDiffReport() .equals(other.getDiffReport()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasDiffReport()) { hash = (37 * hash) + DIFFREPORT_FIELD_NUMBER; hash = (53 * hash) + getDiffReport().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetSnapshotDiffReportListingResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportListingResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportListingResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getDiffReportFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (diffReportBuilder_ == null) { diffReport_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.getDefaultInstance(); } else { diffReportBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetSnapshotDiffReportListingResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (diffReportBuilder_ == null) { result.diffReport_ = diffReport_; } else { result.diffReport_ = diffReportBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.getDefaultInstance()) return this; if (other.hasDiffReport()) { mergeDiffReport(other.getDiffReport()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasDiffReport()) { return false; } if (!getDiffReport().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto diffReport_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProtoOrBuilder> diffReportBuilder_; /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ public boolean hasDiffReport() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDiffReport() { if (diffReportBuilder_ == null) { return diffReport_; } else { return diffReportBuilder_.getMessage(); } } /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ public Builder setDiffReport(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto value) { if (diffReportBuilder_ == null) { if (value == null) { throw new NullPointerException(); } diffReport_ = value; onChanged(); } else { diffReportBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ public Builder setDiffReport( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder builderForValue) { if (diffReportBuilder_ == null) { diffReport_ = builderForValue.build(); onChanged(); } else { diffReportBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ public Builder mergeDiffReport(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto value) { if (diffReportBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && diffReport_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.getDefaultInstance()) { diffReport_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.newBuilder(diffReport_).mergeFrom(value).buildPartial(); } else { diffReport_ = value; } onChanged(); } else { diffReportBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ public Builder clearDiffReport() { if (diffReportBuilder_ == null) { diffReport_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.getDefaultInstance(); onChanged(); } else { diffReportBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder getDiffReportBuilder() { bitField0_ |= 0x00000001; onChanged(); return getDiffReportFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProtoOrBuilder getDiffReportOrBuilder() { if (diffReportBuilder_ != null) { return diffReportBuilder_.getMessageOrBuilder(); } else { return diffReport_; } } /** * required .hadoop.hdfs.SnapshotDiffReportListingProto diffReport = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProtoOrBuilder> getDiffReportFieldBuilder() { if (diffReportBuilder_ == null) { diffReportBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProtoOrBuilder>( diffReport_, getParentForChildren(), isClean()); diffReport_ = null; } return diffReportBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetSnapshotDiffReportListingResponseProto) } static { defaultInstance = new GetSnapshotDiffReportListingResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetSnapshotDiffReportListingResponseProto) } public interface RenewLeaseRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string clientName = 1; /** * required string clientName = 1; */ boolean hasClientName(); /** * required string clientName = 1; */ java.lang.String getClientName(); /** * required string clientName = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.RenewLeaseRequestProto} */ public static final class RenewLeaseRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RenewLeaseRequestProtoOrBuilder { // Use RenewLeaseRequestProto.newBuilder() to construct. private RenewLeaseRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RenewLeaseRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RenewLeaseRequestProto defaultInstance; public static RenewLeaseRequestProto getDefaultInstance() { return defaultInstance; } public RenewLeaseRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RenewLeaseRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; clientName_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenewLeaseRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenewLeaseRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RenewLeaseRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RenewLeaseRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string clientName = 1; public static final int CLIENTNAME_FIELD_NUMBER = 1; private java.lang.Object clientName_; /** * required string clientName = 1; */ public boolean hasClientName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string clientName = 1; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * required string clientName = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { clientName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasClientName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getClientNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getClientNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto) obj; boolean result = true; result = result && (hasClientName() == other.hasClientName()); if (hasClientName()) { result = result && getClientName() .equals(other.getClientName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RenewLeaseRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenewLeaseRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenewLeaseRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenewLeaseRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.clientName_ = clientName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.getDefaultInstance()) return this; if (other.hasClientName()) { bitField0_ |= 0x00000001; clientName_ = other.clientName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasClientName()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string clientName = 1; private java.lang.Object clientName_ = ""; /** * required string clientName = 1; */ public boolean hasClientName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string clientName = 1; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientName = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientName = 1; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; clientName_ = value; onChanged(); return this; } /** * required string clientName = 1; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000001); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * required string clientName = 1; */ public Builder setClientNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; clientName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RenewLeaseRequestProto) } static { defaultInstance = new RenewLeaseRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RenewLeaseRequestProto) } public interface RenewLeaseResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.RenewLeaseResponseProto} * *
   *void response
   * 
*/ public static final class RenewLeaseResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RenewLeaseResponseProtoOrBuilder { // Use RenewLeaseResponseProto.newBuilder() to construct. private RenewLeaseResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RenewLeaseResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RenewLeaseResponseProto defaultInstance; public static RenewLeaseResponseProto getDefaultInstance() { return defaultInstance; } public RenewLeaseResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RenewLeaseResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenewLeaseResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenewLeaseResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RenewLeaseResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RenewLeaseResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RenewLeaseResponseProto} * *
     *void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenewLeaseResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenewLeaseResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenewLeaseResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RenewLeaseResponseProto) } static { defaultInstance = new RenewLeaseResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RenewLeaseResponseProto) } public interface RecoverLeaseRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required string clientName = 2; /** * required string clientName = 2; */ boolean hasClientName(); /** * required string clientName = 2; */ java.lang.String getClientName(); /** * required string clientName = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.RecoverLeaseRequestProto} */ public static final class RecoverLeaseRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RecoverLeaseRequestProtoOrBuilder { // Use RecoverLeaseRequestProto.newBuilder() to construct. private RecoverLeaseRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RecoverLeaseRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RecoverLeaseRequestProto defaultInstance; public static RecoverLeaseRequestProto getDefaultInstance() { return defaultInstance; } public RecoverLeaseRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RecoverLeaseRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; clientName_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RecoverLeaseRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RecoverLeaseRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RecoverLeaseRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RecoverLeaseRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string clientName = 2; public static final int CLIENTNAME_FIELD_NUMBER = 2; private java.lang.Object clientName_; /** * required string clientName = 2; */ public boolean hasClientName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string clientName = 2; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * required string clientName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { src_ = ""; clientName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasClientName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getClientNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getClientNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasClientName() == other.hasClientName()); if (hasClientName()) { result = result && getClientName() .equals(other.getClientName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RecoverLeaseRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RecoverLeaseRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RecoverLeaseRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RecoverLeaseRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.clientName_ = clientName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasClientName()) { bitField0_ |= 0x00000002; clientName_ = other.clientName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasClientName()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required string clientName = 2; private java.lang.Object clientName_ = ""; /** * required string clientName = 2; */ public boolean hasClientName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string clientName = 2; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientName = 2; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientName_ = value; onChanged(); return this; } /** * required string clientName = 2; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000002); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * required string clientName = 2; */ public Builder setClientNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RecoverLeaseRequestProto) } static { defaultInstance = new RecoverLeaseRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RecoverLeaseRequestProto) } public interface RecoverLeaseResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required bool result = 1; /** * required bool result = 1; */ boolean hasResult(); /** * required bool result = 1; */ boolean getResult(); } /** * Protobuf type {@code hadoop.hdfs.RecoverLeaseResponseProto} */ public static final class RecoverLeaseResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RecoverLeaseResponseProtoOrBuilder { // Use RecoverLeaseResponseProto.newBuilder() to construct. private RecoverLeaseResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RecoverLeaseResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RecoverLeaseResponseProto defaultInstance; public static RecoverLeaseResponseProto getDefaultInstance() { return defaultInstance; } public RecoverLeaseResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RecoverLeaseResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; result_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RecoverLeaseResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RecoverLeaseResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RecoverLeaseResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RecoverLeaseResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool result = 1; public static final int RESULT_FIELD_NUMBER = 1; private boolean result_; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } private void initFields() { result_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasResult()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, result_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, result_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto) obj; boolean result = true; result = result && (hasResult() == other.hasResult()); if (hasResult()) { result = result && (getResult() == other.getResult()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getResult()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RecoverLeaseResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RecoverLeaseResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RecoverLeaseResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); result_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RecoverLeaseResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.result_ = result_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance()) return this; if (other.hasResult()) { setResult(other.getResult()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasResult()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool result = 1; private boolean result_ ; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } /** * required bool result = 1; */ public Builder setResult(boolean value) { bitField0_ |= 0x00000001; result_ = value; onChanged(); return this; } /** * required bool result = 1; */ public Builder clearResult() { bitField0_ = (bitField0_ & ~0x00000001); result_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RecoverLeaseResponseProto) } static { defaultInstance = new RecoverLeaseResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RecoverLeaseResponseProto) } public interface GetFsStatusRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.GetFsStatusRequestProto} * *
   * no input paramters
   * 
*/ public static final class GetFsStatusRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetFsStatusRequestProtoOrBuilder { // Use GetFsStatusRequestProto.newBuilder() to construct. private GetFsStatusRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetFsStatusRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetFsStatusRequestProto defaultInstance; public static GetFsStatusRequestProto getDefaultInstance() { return defaultInstance; } public GetFsStatusRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFsStatusRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsStatusRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsStatusRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetFsStatusRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetFsStatusRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetFsStatusRequestProto} * *
     * no input paramters
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsStatusRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsStatusRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsStatusRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetFsStatusRequestProto) } static { defaultInstance = new GetFsStatusRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetFsStatusRequestProto) } public interface GetFsStatsResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required uint64 capacity = 1; /** * required uint64 capacity = 1; */ boolean hasCapacity(); /** * required uint64 capacity = 1; */ long getCapacity(); // required uint64 used = 2; /** * required uint64 used = 2; */ boolean hasUsed(); /** * required uint64 used = 2; */ long getUsed(); // required uint64 remaining = 3; /** * required uint64 remaining = 3; */ boolean hasRemaining(); /** * required uint64 remaining = 3; */ long getRemaining(); // required uint64 under_replicated = 4; /** * required uint64 under_replicated = 4; */ boolean hasUnderReplicated(); /** * required uint64 under_replicated = 4; */ long getUnderReplicated(); // required uint64 corrupt_blocks = 5; /** * required uint64 corrupt_blocks = 5; */ boolean hasCorruptBlocks(); /** * required uint64 corrupt_blocks = 5; */ long getCorruptBlocks(); // required uint64 missing_blocks = 6; /** * required uint64 missing_blocks = 6; */ boolean hasMissingBlocks(); /** * required uint64 missing_blocks = 6; */ long getMissingBlocks(); // optional uint64 missing_repl_one_blocks = 7; /** * optional uint64 missing_repl_one_blocks = 7; */ boolean hasMissingReplOneBlocks(); /** * optional uint64 missing_repl_one_blocks = 7; */ long getMissingReplOneBlocks(); // optional uint64 blocks_in_future = 8; /** * optional uint64 blocks_in_future = 8; */ boolean hasBlocksInFuture(); /** * optional uint64 blocks_in_future = 8; */ long getBlocksInFuture(); // optional uint64 pending_deletion_blocks = 9; /** * optional uint64 pending_deletion_blocks = 9; */ boolean hasPendingDeletionBlocks(); /** * optional uint64 pending_deletion_blocks = 9; */ long getPendingDeletionBlocks(); } /** * Protobuf type {@code hadoop.hdfs.GetFsStatsResponseProto} */ public static final class GetFsStatsResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetFsStatsResponseProtoOrBuilder { // Use GetFsStatsResponseProto.newBuilder() to construct. private GetFsStatsResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetFsStatsResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetFsStatsResponseProto defaultInstance; public static GetFsStatsResponseProto getDefaultInstance() { return defaultInstance; } public GetFsStatsResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFsStatsResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; capacity_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; used_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; remaining_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; underReplicated_ = input.readUInt64(); break; } case 40: { bitField0_ |= 0x00000010; corruptBlocks_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; missingBlocks_ = input.readUInt64(); break; } case 56: { bitField0_ |= 0x00000040; missingReplOneBlocks_ = input.readUInt64(); break; } case 64: { bitField0_ |= 0x00000080; blocksInFuture_ = input.readUInt64(); break; } case 72: { bitField0_ |= 0x00000100; pendingDeletionBlocks_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsStatsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsStatsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetFsStatsResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetFsStatsResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 capacity = 1; public static final int CAPACITY_FIELD_NUMBER = 1; private long capacity_; /** * required uint64 capacity = 1; */ public boolean hasCapacity() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 capacity = 1; */ public long getCapacity() { return capacity_; } // required uint64 used = 2; public static final int USED_FIELD_NUMBER = 2; private long used_; /** * required uint64 used = 2; */ public boolean hasUsed() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 used = 2; */ public long getUsed() { return used_; } // required uint64 remaining = 3; public static final int REMAINING_FIELD_NUMBER = 3; private long remaining_; /** * required uint64 remaining = 3; */ public boolean hasRemaining() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 remaining = 3; */ public long getRemaining() { return remaining_; } // required uint64 under_replicated = 4; public static final int UNDER_REPLICATED_FIELD_NUMBER = 4; private long underReplicated_; /** * required uint64 under_replicated = 4; */ public boolean hasUnderReplicated() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint64 under_replicated = 4; */ public long getUnderReplicated() { return underReplicated_; } // required uint64 corrupt_blocks = 5; public static final int CORRUPT_BLOCKS_FIELD_NUMBER = 5; private long corruptBlocks_; /** * required uint64 corrupt_blocks = 5; */ public boolean hasCorruptBlocks() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 corrupt_blocks = 5; */ public long getCorruptBlocks() { return corruptBlocks_; } // required uint64 missing_blocks = 6; public static final int MISSING_BLOCKS_FIELD_NUMBER = 6; private long missingBlocks_; /** * required uint64 missing_blocks = 6; */ public boolean hasMissingBlocks() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required uint64 missing_blocks = 6; */ public long getMissingBlocks() { return missingBlocks_; } // optional uint64 missing_repl_one_blocks = 7; public static final int MISSING_REPL_ONE_BLOCKS_FIELD_NUMBER = 7; private long missingReplOneBlocks_; /** * optional uint64 missing_repl_one_blocks = 7; */ public boolean hasMissingReplOneBlocks() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint64 missing_repl_one_blocks = 7; */ public long getMissingReplOneBlocks() { return missingReplOneBlocks_; } // optional uint64 blocks_in_future = 8; public static final int BLOCKS_IN_FUTURE_FIELD_NUMBER = 8; private long blocksInFuture_; /** * optional uint64 blocks_in_future = 8; */ public boolean hasBlocksInFuture() { return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional uint64 blocks_in_future = 8; */ public long getBlocksInFuture() { return blocksInFuture_; } // optional uint64 pending_deletion_blocks = 9; public static final int PENDING_DELETION_BLOCKS_FIELD_NUMBER = 9; private long pendingDeletionBlocks_; /** * optional uint64 pending_deletion_blocks = 9; */ public boolean hasPendingDeletionBlocks() { return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional uint64 pending_deletion_blocks = 9; */ public long getPendingDeletionBlocks() { return pendingDeletionBlocks_; } private void initFields() { capacity_ = 0L; used_ = 0L; remaining_ = 0L; underReplicated_ = 0L; corruptBlocks_ = 0L; missingBlocks_ = 0L; missingReplOneBlocks_ = 0L; blocksInFuture_ = 0L; pendingDeletionBlocks_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasCapacity()) { memoizedIsInitialized = 0; return false; } if (!hasUsed()) { memoizedIsInitialized = 0; return false; } if (!hasRemaining()) { memoizedIsInitialized = 0; return false; } if (!hasUnderReplicated()) { memoizedIsInitialized = 0; return false; } if (!hasCorruptBlocks()) { memoizedIsInitialized = 0; return false; } if (!hasMissingBlocks()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, capacity_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, used_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, remaining_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, underReplicated_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt64(5, corruptBlocks_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt64(6, missingBlocks_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeUInt64(7, missingReplOneBlocks_); } if (((bitField0_ & 0x00000080) == 0x00000080)) { output.writeUInt64(8, blocksInFuture_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { output.writeUInt64(9, pendingDeletionBlocks_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(1, capacity_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(2, used_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(3, remaining_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(4, underReplicated_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(5, corruptBlocks_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(6, missingBlocks_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(7, missingReplOneBlocks_); } if (((bitField0_ & 0x00000080) == 0x00000080)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(8, blocksInFuture_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(9, pendingDeletionBlocks_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto) obj; boolean result = true; result = result && (hasCapacity() == other.hasCapacity()); if (hasCapacity()) { result = result && (getCapacity() == other.getCapacity()); } result = result && (hasUsed() == other.hasUsed()); if (hasUsed()) { result = result && (getUsed() == other.getUsed()); } result = result && (hasRemaining() == other.hasRemaining()); if (hasRemaining()) { result = result && (getRemaining() == other.getRemaining()); } result = result && (hasUnderReplicated() == other.hasUnderReplicated()); if (hasUnderReplicated()) { result = result && (getUnderReplicated() == other.getUnderReplicated()); } result = result && (hasCorruptBlocks() == other.hasCorruptBlocks()); if (hasCorruptBlocks()) { result = result && (getCorruptBlocks() == other.getCorruptBlocks()); } result = result && (hasMissingBlocks() == other.hasMissingBlocks()); if (hasMissingBlocks()) { result = result && (getMissingBlocks() == other.getMissingBlocks()); } result = result && (hasMissingReplOneBlocks() == other.hasMissingReplOneBlocks()); if (hasMissingReplOneBlocks()) { result = result && (getMissingReplOneBlocks() == other.getMissingReplOneBlocks()); } result = result && (hasBlocksInFuture() == other.hasBlocksInFuture()); if (hasBlocksInFuture()) { result = result && (getBlocksInFuture() == other.getBlocksInFuture()); } result = result && (hasPendingDeletionBlocks() == other.hasPendingDeletionBlocks()); if (hasPendingDeletionBlocks()) { result = result && (getPendingDeletionBlocks() == other.getPendingDeletionBlocks()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasCapacity()) { hash = (37 * hash) + CAPACITY_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCapacity()); } if (hasUsed()) { hash = (37 * hash) + USED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getUsed()); } if (hasRemaining()) { hash = (37 * hash) + REMAINING_FIELD_NUMBER; hash = (53 * hash) + hashLong(getRemaining()); } if (hasUnderReplicated()) { hash = (37 * hash) + UNDER_REPLICATED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getUnderReplicated()); } if (hasCorruptBlocks()) { hash = (37 * hash) + CORRUPT_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCorruptBlocks()); } if (hasMissingBlocks()) { hash = (37 * hash) + MISSING_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMissingBlocks()); } if (hasMissingReplOneBlocks()) { hash = (37 * hash) + MISSING_REPL_ONE_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMissingReplOneBlocks()); } if (hasBlocksInFuture()) { hash = (37 * hash) + BLOCKS_IN_FUTURE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBlocksInFuture()); } if (hasPendingDeletionBlocks()) { hash = (37 * hash) + PENDING_DELETION_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getPendingDeletionBlocks()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetFsStatsResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsStatsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsStatsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); capacity_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); used_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); remaining_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); underReplicated_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); corruptBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); missingBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); missingReplOneBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000040); blocksInFuture_ = 0L; bitField0_ = (bitField0_ & ~0x00000080); pendingDeletionBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000100); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsStatsResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.capacity_ = capacity_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.used_ = used_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.remaining_ = remaining_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.underReplicated_ = underReplicated_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.corruptBlocks_ = corruptBlocks_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.missingBlocks_ = missingBlocks_; if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } result.missingReplOneBlocks_ = missingReplOneBlocks_; if (((from_bitField0_ & 0x00000080) == 0x00000080)) { to_bitField0_ |= 0x00000080; } result.blocksInFuture_ = blocksInFuture_; if (((from_bitField0_ & 0x00000100) == 0x00000100)) { to_bitField0_ |= 0x00000100; } result.pendingDeletionBlocks_ = pendingDeletionBlocks_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance()) return this; if (other.hasCapacity()) { setCapacity(other.getCapacity()); } if (other.hasUsed()) { setUsed(other.getUsed()); } if (other.hasRemaining()) { setRemaining(other.getRemaining()); } if (other.hasUnderReplicated()) { setUnderReplicated(other.getUnderReplicated()); } if (other.hasCorruptBlocks()) { setCorruptBlocks(other.getCorruptBlocks()); } if (other.hasMissingBlocks()) { setMissingBlocks(other.getMissingBlocks()); } if (other.hasMissingReplOneBlocks()) { setMissingReplOneBlocks(other.getMissingReplOneBlocks()); } if (other.hasBlocksInFuture()) { setBlocksInFuture(other.getBlocksInFuture()); } if (other.hasPendingDeletionBlocks()) { setPendingDeletionBlocks(other.getPendingDeletionBlocks()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasCapacity()) { return false; } if (!hasUsed()) { return false; } if (!hasRemaining()) { return false; } if (!hasUnderReplicated()) { return false; } if (!hasCorruptBlocks()) { return false; } if (!hasMissingBlocks()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 capacity = 1; private long capacity_ ; /** * required uint64 capacity = 1; */ public boolean hasCapacity() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 capacity = 1; */ public long getCapacity() { return capacity_; } /** * required uint64 capacity = 1; */ public Builder setCapacity(long value) { bitField0_ |= 0x00000001; capacity_ = value; onChanged(); return this; } /** * required uint64 capacity = 1; */ public Builder clearCapacity() { bitField0_ = (bitField0_ & ~0x00000001); capacity_ = 0L; onChanged(); return this; } // required uint64 used = 2; private long used_ ; /** * required uint64 used = 2; */ public boolean hasUsed() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 used = 2; */ public long getUsed() { return used_; } /** * required uint64 used = 2; */ public Builder setUsed(long value) { bitField0_ |= 0x00000002; used_ = value; onChanged(); return this; } /** * required uint64 used = 2; */ public Builder clearUsed() { bitField0_ = (bitField0_ & ~0x00000002); used_ = 0L; onChanged(); return this; } // required uint64 remaining = 3; private long remaining_ ; /** * required uint64 remaining = 3; */ public boolean hasRemaining() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 remaining = 3; */ public long getRemaining() { return remaining_; } /** * required uint64 remaining = 3; */ public Builder setRemaining(long value) { bitField0_ |= 0x00000004; remaining_ = value; onChanged(); return this; } /** * required uint64 remaining = 3; */ public Builder clearRemaining() { bitField0_ = (bitField0_ & ~0x00000004); remaining_ = 0L; onChanged(); return this; } // required uint64 under_replicated = 4; private long underReplicated_ ; /** * required uint64 under_replicated = 4; */ public boolean hasUnderReplicated() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint64 under_replicated = 4; */ public long getUnderReplicated() { return underReplicated_; } /** * required uint64 under_replicated = 4; */ public Builder setUnderReplicated(long value) { bitField0_ |= 0x00000008; underReplicated_ = value; onChanged(); return this; } /** * required uint64 under_replicated = 4; */ public Builder clearUnderReplicated() { bitField0_ = (bitField0_ & ~0x00000008); underReplicated_ = 0L; onChanged(); return this; } // required uint64 corrupt_blocks = 5; private long corruptBlocks_ ; /** * required uint64 corrupt_blocks = 5; */ public boolean hasCorruptBlocks() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 corrupt_blocks = 5; */ public long getCorruptBlocks() { return corruptBlocks_; } /** * required uint64 corrupt_blocks = 5; */ public Builder setCorruptBlocks(long value) { bitField0_ |= 0x00000010; corruptBlocks_ = value; onChanged(); return this; } /** * required uint64 corrupt_blocks = 5; */ public Builder clearCorruptBlocks() { bitField0_ = (bitField0_ & ~0x00000010); corruptBlocks_ = 0L; onChanged(); return this; } // required uint64 missing_blocks = 6; private long missingBlocks_ ; /** * required uint64 missing_blocks = 6; */ public boolean hasMissingBlocks() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required uint64 missing_blocks = 6; */ public long getMissingBlocks() { return missingBlocks_; } /** * required uint64 missing_blocks = 6; */ public Builder setMissingBlocks(long value) { bitField0_ |= 0x00000020; missingBlocks_ = value; onChanged(); return this; } /** * required uint64 missing_blocks = 6; */ public Builder clearMissingBlocks() { bitField0_ = (bitField0_ & ~0x00000020); missingBlocks_ = 0L; onChanged(); return this; } // optional uint64 missing_repl_one_blocks = 7; private long missingReplOneBlocks_ ; /** * optional uint64 missing_repl_one_blocks = 7; */ public boolean hasMissingReplOneBlocks() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint64 missing_repl_one_blocks = 7; */ public long getMissingReplOneBlocks() { return missingReplOneBlocks_; } /** * optional uint64 missing_repl_one_blocks = 7; */ public Builder setMissingReplOneBlocks(long value) { bitField0_ |= 0x00000040; missingReplOneBlocks_ = value; onChanged(); return this; } /** * optional uint64 missing_repl_one_blocks = 7; */ public Builder clearMissingReplOneBlocks() { bitField0_ = (bitField0_ & ~0x00000040); missingReplOneBlocks_ = 0L; onChanged(); return this; } // optional uint64 blocks_in_future = 8; private long blocksInFuture_ ; /** * optional uint64 blocks_in_future = 8; */ public boolean hasBlocksInFuture() { return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional uint64 blocks_in_future = 8; */ public long getBlocksInFuture() { return blocksInFuture_; } /** * optional uint64 blocks_in_future = 8; */ public Builder setBlocksInFuture(long value) { bitField0_ |= 0x00000080; blocksInFuture_ = value; onChanged(); return this; } /** * optional uint64 blocks_in_future = 8; */ public Builder clearBlocksInFuture() { bitField0_ = (bitField0_ & ~0x00000080); blocksInFuture_ = 0L; onChanged(); return this; } // optional uint64 pending_deletion_blocks = 9; private long pendingDeletionBlocks_ ; /** * optional uint64 pending_deletion_blocks = 9; */ public boolean hasPendingDeletionBlocks() { return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional uint64 pending_deletion_blocks = 9; */ public long getPendingDeletionBlocks() { return pendingDeletionBlocks_; } /** * optional uint64 pending_deletion_blocks = 9; */ public Builder setPendingDeletionBlocks(long value) { bitField0_ |= 0x00000100; pendingDeletionBlocks_ = value; onChanged(); return this; } /** * optional uint64 pending_deletion_blocks = 9; */ public Builder clearPendingDeletionBlocks() { bitField0_ = (bitField0_ & ~0x00000100); pendingDeletionBlocks_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetFsStatsResponseProto) } static { defaultInstance = new GetFsStatsResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetFsStatsResponseProto) } public interface GetFsReplicatedBlockStatsRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.GetFsReplicatedBlockStatsRequestProto} * *
   * no input paramters
   * 
*/ public static final class GetFsReplicatedBlockStatsRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetFsReplicatedBlockStatsRequestProtoOrBuilder { // Use GetFsReplicatedBlockStatsRequestProto.newBuilder() to construct. private GetFsReplicatedBlockStatsRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetFsReplicatedBlockStatsRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetFsReplicatedBlockStatsRequestProto defaultInstance; public static GetFsReplicatedBlockStatsRequestProto getDefaultInstance() { return defaultInstance; } public GetFsReplicatedBlockStatsRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFsReplicatedBlockStatsRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetFsReplicatedBlockStatsRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetFsReplicatedBlockStatsRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetFsReplicatedBlockStatsRequestProto} * *
     * no input paramters
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetFsReplicatedBlockStatsRequestProto) } static { defaultInstance = new GetFsReplicatedBlockStatsRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetFsReplicatedBlockStatsRequestProto) } public interface GetFsReplicatedBlockStatsResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required uint64 low_redundancy = 1; /** * required uint64 low_redundancy = 1; */ boolean hasLowRedundancy(); /** * required uint64 low_redundancy = 1; */ long getLowRedundancy(); // required uint64 corrupt_blocks = 2; /** * required uint64 corrupt_blocks = 2; */ boolean hasCorruptBlocks(); /** * required uint64 corrupt_blocks = 2; */ long getCorruptBlocks(); // required uint64 missing_blocks = 3; /** * required uint64 missing_blocks = 3; */ boolean hasMissingBlocks(); /** * required uint64 missing_blocks = 3; */ long getMissingBlocks(); // required uint64 missing_repl_one_blocks = 4; /** * required uint64 missing_repl_one_blocks = 4; */ boolean hasMissingReplOneBlocks(); /** * required uint64 missing_repl_one_blocks = 4; */ long getMissingReplOneBlocks(); // required uint64 blocks_in_future = 5; /** * required uint64 blocks_in_future = 5; */ boolean hasBlocksInFuture(); /** * required uint64 blocks_in_future = 5; */ long getBlocksInFuture(); // required uint64 pending_deletion_blocks = 6; /** * required uint64 pending_deletion_blocks = 6; */ boolean hasPendingDeletionBlocks(); /** * required uint64 pending_deletion_blocks = 6; */ long getPendingDeletionBlocks(); // optional uint64 highest_prio_low_redundancy_blocks = 7; /** * optional uint64 highest_prio_low_redundancy_blocks = 7; */ boolean hasHighestPrioLowRedundancyBlocks(); /** * optional uint64 highest_prio_low_redundancy_blocks = 7; */ long getHighestPrioLowRedundancyBlocks(); } /** * Protobuf type {@code hadoop.hdfs.GetFsReplicatedBlockStatsResponseProto} */ public static final class GetFsReplicatedBlockStatsResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetFsReplicatedBlockStatsResponseProtoOrBuilder { // Use GetFsReplicatedBlockStatsResponseProto.newBuilder() to construct. private GetFsReplicatedBlockStatsResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetFsReplicatedBlockStatsResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetFsReplicatedBlockStatsResponseProto defaultInstance; public static GetFsReplicatedBlockStatsResponseProto getDefaultInstance() { return defaultInstance; } public GetFsReplicatedBlockStatsResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFsReplicatedBlockStatsResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; lowRedundancy_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; corruptBlocks_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; missingBlocks_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; missingReplOneBlocks_ = input.readUInt64(); break; } case 40: { bitField0_ |= 0x00000010; blocksInFuture_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; pendingDeletionBlocks_ = input.readUInt64(); break; } case 56: { bitField0_ |= 0x00000040; highestPrioLowRedundancyBlocks_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetFsReplicatedBlockStatsResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetFsReplicatedBlockStatsResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 low_redundancy = 1; public static final int LOW_REDUNDANCY_FIELD_NUMBER = 1; private long lowRedundancy_; /** * required uint64 low_redundancy = 1; */ public boolean hasLowRedundancy() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 low_redundancy = 1; */ public long getLowRedundancy() { return lowRedundancy_; } // required uint64 corrupt_blocks = 2; public static final int CORRUPT_BLOCKS_FIELD_NUMBER = 2; private long corruptBlocks_; /** * required uint64 corrupt_blocks = 2; */ public boolean hasCorruptBlocks() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 corrupt_blocks = 2; */ public long getCorruptBlocks() { return corruptBlocks_; } // required uint64 missing_blocks = 3; public static final int MISSING_BLOCKS_FIELD_NUMBER = 3; private long missingBlocks_; /** * required uint64 missing_blocks = 3; */ public boolean hasMissingBlocks() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 missing_blocks = 3; */ public long getMissingBlocks() { return missingBlocks_; } // required uint64 missing_repl_one_blocks = 4; public static final int MISSING_REPL_ONE_BLOCKS_FIELD_NUMBER = 4; private long missingReplOneBlocks_; /** * required uint64 missing_repl_one_blocks = 4; */ public boolean hasMissingReplOneBlocks() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint64 missing_repl_one_blocks = 4; */ public long getMissingReplOneBlocks() { return missingReplOneBlocks_; } // required uint64 blocks_in_future = 5; public static final int BLOCKS_IN_FUTURE_FIELD_NUMBER = 5; private long blocksInFuture_; /** * required uint64 blocks_in_future = 5; */ public boolean hasBlocksInFuture() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 blocks_in_future = 5; */ public long getBlocksInFuture() { return blocksInFuture_; } // required uint64 pending_deletion_blocks = 6; public static final int PENDING_DELETION_BLOCKS_FIELD_NUMBER = 6; private long pendingDeletionBlocks_; /** * required uint64 pending_deletion_blocks = 6; */ public boolean hasPendingDeletionBlocks() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required uint64 pending_deletion_blocks = 6; */ public long getPendingDeletionBlocks() { return pendingDeletionBlocks_; } // optional uint64 highest_prio_low_redundancy_blocks = 7; public static final int HIGHEST_PRIO_LOW_REDUNDANCY_BLOCKS_FIELD_NUMBER = 7; private long highestPrioLowRedundancyBlocks_; /** * optional uint64 highest_prio_low_redundancy_blocks = 7; */ public boolean hasHighestPrioLowRedundancyBlocks() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint64 highest_prio_low_redundancy_blocks = 7; */ public long getHighestPrioLowRedundancyBlocks() { return highestPrioLowRedundancyBlocks_; } private void initFields() { lowRedundancy_ = 0L; corruptBlocks_ = 0L; missingBlocks_ = 0L; missingReplOneBlocks_ = 0L; blocksInFuture_ = 0L; pendingDeletionBlocks_ = 0L; highestPrioLowRedundancyBlocks_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasLowRedundancy()) { memoizedIsInitialized = 0; return false; } if (!hasCorruptBlocks()) { memoizedIsInitialized = 0; return false; } if (!hasMissingBlocks()) { memoizedIsInitialized = 0; return false; } if (!hasMissingReplOneBlocks()) { memoizedIsInitialized = 0; return false; } if (!hasBlocksInFuture()) { memoizedIsInitialized = 0; return false; } if (!hasPendingDeletionBlocks()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, lowRedundancy_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, corruptBlocks_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, missingBlocks_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, missingReplOneBlocks_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt64(5, blocksInFuture_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt64(6, pendingDeletionBlocks_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeUInt64(7, highestPrioLowRedundancyBlocks_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(1, lowRedundancy_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(2, corruptBlocks_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(3, missingBlocks_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(4, missingReplOneBlocks_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(5, blocksInFuture_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(6, pendingDeletionBlocks_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(7, highestPrioLowRedundancyBlocks_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto) obj; boolean result = true; result = result && (hasLowRedundancy() == other.hasLowRedundancy()); if (hasLowRedundancy()) { result = result && (getLowRedundancy() == other.getLowRedundancy()); } result = result && (hasCorruptBlocks() == other.hasCorruptBlocks()); if (hasCorruptBlocks()) { result = result && (getCorruptBlocks() == other.getCorruptBlocks()); } result = result && (hasMissingBlocks() == other.hasMissingBlocks()); if (hasMissingBlocks()) { result = result && (getMissingBlocks() == other.getMissingBlocks()); } result = result && (hasMissingReplOneBlocks() == other.hasMissingReplOneBlocks()); if (hasMissingReplOneBlocks()) { result = result && (getMissingReplOneBlocks() == other.getMissingReplOneBlocks()); } result = result && (hasBlocksInFuture() == other.hasBlocksInFuture()); if (hasBlocksInFuture()) { result = result && (getBlocksInFuture() == other.getBlocksInFuture()); } result = result && (hasPendingDeletionBlocks() == other.hasPendingDeletionBlocks()); if (hasPendingDeletionBlocks()) { result = result && (getPendingDeletionBlocks() == other.getPendingDeletionBlocks()); } result = result && (hasHighestPrioLowRedundancyBlocks() == other.hasHighestPrioLowRedundancyBlocks()); if (hasHighestPrioLowRedundancyBlocks()) { result = result && (getHighestPrioLowRedundancyBlocks() == other.getHighestPrioLowRedundancyBlocks()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasLowRedundancy()) { hash = (37 * hash) + LOW_REDUNDANCY_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLowRedundancy()); } if (hasCorruptBlocks()) { hash = (37 * hash) + CORRUPT_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCorruptBlocks()); } if (hasMissingBlocks()) { hash = (37 * hash) + MISSING_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMissingBlocks()); } if (hasMissingReplOneBlocks()) { hash = (37 * hash) + MISSING_REPL_ONE_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMissingReplOneBlocks()); } if (hasBlocksInFuture()) { hash = (37 * hash) + BLOCKS_IN_FUTURE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBlocksInFuture()); } if (hasPendingDeletionBlocks()) { hash = (37 * hash) + PENDING_DELETION_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getPendingDeletionBlocks()); } if (hasHighestPrioLowRedundancyBlocks()) { hash = (37 * hash) + HIGHEST_PRIO_LOW_REDUNDANCY_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getHighestPrioLowRedundancyBlocks()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetFsReplicatedBlockStatsResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); lowRedundancy_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); corruptBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); missingBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); missingReplOneBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); blocksInFuture_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); pendingDeletionBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); highestPrioLowRedundancyBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000040); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.lowRedundancy_ = lowRedundancy_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.corruptBlocks_ = corruptBlocks_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.missingBlocks_ = missingBlocks_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.missingReplOneBlocks_ = missingReplOneBlocks_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.blocksInFuture_ = blocksInFuture_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.pendingDeletionBlocks_ = pendingDeletionBlocks_; if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } result.highestPrioLowRedundancyBlocks_ = highestPrioLowRedundancyBlocks_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.getDefaultInstance()) return this; if (other.hasLowRedundancy()) { setLowRedundancy(other.getLowRedundancy()); } if (other.hasCorruptBlocks()) { setCorruptBlocks(other.getCorruptBlocks()); } if (other.hasMissingBlocks()) { setMissingBlocks(other.getMissingBlocks()); } if (other.hasMissingReplOneBlocks()) { setMissingReplOneBlocks(other.getMissingReplOneBlocks()); } if (other.hasBlocksInFuture()) { setBlocksInFuture(other.getBlocksInFuture()); } if (other.hasPendingDeletionBlocks()) { setPendingDeletionBlocks(other.getPendingDeletionBlocks()); } if (other.hasHighestPrioLowRedundancyBlocks()) { setHighestPrioLowRedundancyBlocks(other.getHighestPrioLowRedundancyBlocks()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasLowRedundancy()) { return false; } if (!hasCorruptBlocks()) { return false; } if (!hasMissingBlocks()) { return false; } if (!hasMissingReplOneBlocks()) { return false; } if (!hasBlocksInFuture()) { return false; } if (!hasPendingDeletionBlocks()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 low_redundancy = 1; private long lowRedundancy_ ; /** * required uint64 low_redundancy = 1; */ public boolean hasLowRedundancy() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 low_redundancy = 1; */ public long getLowRedundancy() { return lowRedundancy_; } /** * required uint64 low_redundancy = 1; */ public Builder setLowRedundancy(long value) { bitField0_ |= 0x00000001; lowRedundancy_ = value; onChanged(); return this; } /** * required uint64 low_redundancy = 1; */ public Builder clearLowRedundancy() { bitField0_ = (bitField0_ & ~0x00000001); lowRedundancy_ = 0L; onChanged(); return this; } // required uint64 corrupt_blocks = 2; private long corruptBlocks_ ; /** * required uint64 corrupt_blocks = 2; */ public boolean hasCorruptBlocks() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 corrupt_blocks = 2; */ public long getCorruptBlocks() { return corruptBlocks_; } /** * required uint64 corrupt_blocks = 2; */ public Builder setCorruptBlocks(long value) { bitField0_ |= 0x00000002; corruptBlocks_ = value; onChanged(); return this; } /** * required uint64 corrupt_blocks = 2; */ public Builder clearCorruptBlocks() { bitField0_ = (bitField0_ & ~0x00000002); corruptBlocks_ = 0L; onChanged(); return this; } // required uint64 missing_blocks = 3; private long missingBlocks_ ; /** * required uint64 missing_blocks = 3; */ public boolean hasMissingBlocks() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 missing_blocks = 3; */ public long getMissingBlocks() { return missingBlocks_; } /** * required uint64 missing_blocks = 3; */ public Builder setMissingBlocks(long value) { bitField0_ |= 0x00000004; missingBlocks_ = value; onChanged(); return this; } /** * required uint64 missing_blocks = 3; */ public Builder clearMissingBlocks() { bitField0_ = (bitField0_ & ~0x00000004); missingBlocks_ = 0L; onChanged(); return this; } // required uint64 missing_repl_one_blocks = 4; private long missingReplOneBlocks_ ; /** * required uint64 missing_repl_one_blocks = 4; */ public boolean hasMissingReplOneBlocks() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint64 missing_repl_one_blocks = 4; */ public long getMissingReplOneBlocks() { return missingReplOneBlocks_; } /** * required uint64 missing_repl_one_blocks = 4; */ public Builder setMissingReplOneBlocks(long value) { bitField0_ |= 0x00000008; missingReplOneBlocks_ = value; onChanged(); return this; } /** * required uint64 missing_repl_one_blocks = 4; */ public Builder clearMissingReplOneBlocks() { bitField0_ = (bitField0_ & ~0x00000008); missingReplOneBlocks_ = 0L; onChanged(); return this; } // required uint64 blocks_in_future = 5; private long blocksInFuture_ ; /** * required uint64 blocks_in_future = 5; */ public boolean hasBlocksInFuture() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 blocks_in_future = 5; */ public long getBlocksInFuture() { return blocksInFuture_; } /** * required uint64 blocks_in_future = 5; */ public Builder setBlocksInFuture(long value) { bitField0_ |= 0x00000010; blocksInFuture_ = value; onChanged(); return this; } /** * required uint64 blocks_in_future = 5; */ public Builder clearBlocksInFuture() { bitField0_ = (bitField0_ & ~0x00000010); blocksInFuture_ = 0L; onChanged(); return this; } // required uint64 pending_deletion_blocks = 6; private long pendingDeletionBlocks_ ; /** * required uint64 pending_deletion_blocks = 6; */ public boolean hasPendingDeletionBlocks() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * required uint64 pending_deletion_blocks = 6; */ public long getPendingDeletionBlocks() { return pendingDeletionBlocks_; } /** * required uint64 pending_deletion_blocks = 6; */ public Builder setPendingDeletionBlocks(long value) { bitField0_ |= 0x00000020; pendingDeletionBlocks_ = value; onChanged(); return this; } /** * required uint64 pending_deletion_blocks = 6; */ public Builder clearPendingDeletionBlocks() { bitField0_ = (bitField0_ & ~0x00000020); pendingDeletionBlocks_ = 0L; onChanged(); return this; } // optional uint64 highest_prio_low_redundancy_blocks = 7; private long highestPrioLowRedundancyBlocks_ ; /** * optional uint64 highest_prio_low_redundancy_blocks = 7; */ public boolean hasHighestPrioLowRedundancyBlocks() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint64 highest_prio_low_redundancy_blocks = 7; */ public long getHighestPrioLowRedundancyBlocks() { return highestPrioLowRedundancyBlocks_; } /** * optional uint64 highest_prio_low_redundancy_blocks = 7; */ public Builder setHighestPrioLowRedundancyBlocks(long value) { bitField0_ |= 0x00000040; highestPrioLowRedundancyBlocks_ = value; onChanged(); return this; } /** * optional uint64 highest_prio_low_redundancy_blocks = 7; */ public Builder clearHighestPrioLowRedundancyBlocks() { bitField0_ = (bitField0_ & ~0x00000040); highestPrioLowRedundancyBlocks_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetFsReplicatedBlockStatsResponseProto) } static { defaultInstance = new GetFsReplicatedBlockStatsResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetFsReplicatedBlockStatsResponseProto) } public interface GetFsECBlockGroupStatsRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.GetFsECBlockGroupStatsRequestProto} * *
   * no input paramters
   * 
*/ public static final class GetFsECBlockGroupStatsRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetFsECBlockGroupStatsRequestProtoOrBuilder { // Use GetFsECBlockGroupStatsRequestProto.newBuilder() to construct. private GetFsECBlockGroupStatsRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetFsECBlockGroupStatsRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetFsECBlockGroupStatsRequestProto defaultInstance; public static GetFsECBlockGroupStatsRequestProto getDefaultInstance() { return defaultInstance; } public GetFsECBlockGroupStatsRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFsECBlockGroupStatsRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsECBlockGroupStatsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsECBlockGroupStatsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetFsECBlockGroupStatsRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetFsECBlockGroupStatsRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetFsECBlockGroupStatsRequestProto} * *
     * no input paramters
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsECBlockGroupStatsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsECBlockGroupStatsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsECBlockGroupStatsRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetFsECBlockGroupStatsRequestProto) } static { defaultInstance = new GetFsECBlockGroupStatsRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetFsECBlockGroupStatsRequestProto) } public interface GetFsECBlockGroupStatsResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required uint64 low_redundancy = 1; /** * required uint64 low_redundancy = 1; */ boolean hasLowRedundancy(); /** * required uint64 low_redundancy = 1; */ long getLowRedundancy(); // required uint64 corrupt_blocks = 2; /** * required uint64 corrupt_blocks = 2; */ boolean hasCorruptBlocks(); /** * required uint64 corrupt_blocks = 2; */ long getCorruptBlocks(); // required uint64 missing_blocks = 3; /** * required uint64 missing_blocks = 3; */ boolean hasMissingBlocks(); /** * required uint64 missing_blocks = 3; */ long getMissingBlocks(); // required uint64 blocks_in_future = 4; /** * required uint64 blocks_in_future = 4; */ boolean hasBlocksInFuture(); /** * required uint64 blocks_in_future = 4; */ long getBlocksInFuture(); // required uint64 pending_deletion_blocks = 5; /** * required uint64 pending_deletion_blocks = 5; */ boolean hasPendingDeletionBlocks(); /** * required uint64 pending_deletion_blocks = 5; */ long getPendingDeletionBlocks(); // optional uint64 highest_prio_low_redundancy_blocks = 6; /** * optional uint64 highest_prio_low_redundancy_blocks = 6; */ boolean hasHighestPrioLowRedundancyBlocks(); /** * optional uint64 highest_prio_low_redundancy_blocks = 6; */ long getHighestPrioLowRedundancyBlocks(); } /** * Protobuf type {@code hadoop.hdfs.GetFsECBlockGroupStatsResponseProto} */ public static final class GetFsECBlockGroupStatsResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetFsECBlockGroupStatsResponseProtoOrBuilder { // Use GetFsECBlockGroupStatsResponseProto.newBuilder() to construct. private GetFsECBlockGroupStatsResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetFsECBlockGroupStatsResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetFsECBlockGroupStatsResponseProto defaultInstance; public static GetFsECBlockGroupStatsResponseProto getDefaultInstance() { return defaultInstance; } public GetFsECBlockGroupStatsResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFsECBlockGroupStatsResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; lowRedundancy_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; corruptBlocks_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; missingBlocks_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; blocksInFuture_ = input.readUInt64(); break; } case 40: { bitField0_ |= 0x00000010; pendingDeletionBlocks_ = input.readUInt64(); break; } case 48: { bitField0_ |= 0x00000020; highestPrioLowRedundancyBlocks_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsECBlockGroupStatsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsECBlockGroupStatsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetFsECBlockGroupStatsResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetFsECBlockGroupStatsResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 low_redundancy = 1; public static final int LOW_REDUNDANCY_FIELD_NUMBER = 1; private long lowRedundancy_; /** * required uint64 low_redundancy = 1; */ public boolean hasLowRedundancy() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 low_redundancy = 1; */ public long getLowRedundancy() { return lowRedundancy_; } // required uint64 corrupt_blocks = 2; public static final int CORRUPT_BLOCKS_FIELD_NUMBER = 2; private long corruptBlocks_; /** * required uint64 corrupt_blocks = 2; */ public boolean hasCorruptBlocks() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 corrupt_blocks = 2; */ public long getCorruptBlocks() { return corruptBlocks_; } // required uint64 missing_blocks = 3; public static final int MISSING_BLOCKS_FIELD_NUMBER = 3; private long missingBlocks_; /** * required uint64 missing_blocks = 3; */ public boolean hasMissingBlocks() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 missing_blocks = 3; */ public long getMissingBlocks() { return missingBlocks_; } // required uint64 blocks_in_future = 4; public static final int BLOCKS_IN_FUTURE_FIELD_NUMBER = 4; private long blocksInFuture_; /** * required uint64 blocks_in_future = 4; */ public boolean hasBlocksInFuture() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint64 blocks_in_future = 4; */ public long getBlocksInFuture() { return blocksInFuture_; } // required uint64 pending_deletion_blocks = 5; public static final int PENDING_DELETION_BLOCKS_FIELD_NUMBER = 5; private long pendingDeletionBlocks_; /** * required uint64 pending_deletion_blocks = 5; */ public boolean hasPendingDeletionBlocks() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 pending_deletion_blocks = 5; */ public long getPendingDeletionBlocks() { return pendingDeletionBlocks_; } // optional uint64 highest_prio_low_redundancy_blocks = 6; public static final int HIGHEST_PRIO_LOW_REDUNDANCY_BLOCKS_FIELD_NUMBER = 6; private long highestPrioLowRedundancyBlocks_; /** * optional uint64 highest_prio_low_redundancy_blocks = 6; */ public boolean hasHighestPrioLowRedundancyBlocks() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional uint64 highest_prio_low_redundancy_blocks = 6; */ public long getHighestPrioLowRedundancyBlocks() { return highestPrioLowRedundancyBlocks_; } private void initFields() { lowRedundancy_ = 0L; corruptBlocks_ = 0L; missingBlocks_ = 0L; blocksInFuture_ = 0L; pendingDeletionBlocks_ = 0L; highestPrioLowRedundancyBlocks_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasLowRedundancy()) { memoizedIsInitialized = 0; return false; } if (!hasCorruptBlocks()) { memoizedIsInitialized = 0; return false; } if (!hasMissingBlocks()) { memoizedIsInitialized = 0; return false; } if (!hasBlocksInFuture()) { memoizedIsInitialized = 0; return false; } if (!hasPendingDeletionBlocks()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, lowRedundancy_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, corruptBlocks_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, missingBlocks_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, blocksInFuture_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt64(5, pendingDeletionBlocks_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt64(6, highestPrioLowRedundancyBlocks_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(1, lowRedundancy_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(2, corruptBlocks_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(3, missingBlocks_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(4, blocksInFuture_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(5, pendingDeletionBlocks_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(6, highestPrioLowRedundancyBlocks_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto) obj; boolean result = true; result = result && (hasLowRedundancy() == other.hasLowRedundancy()); if (hasLowRedundancy()) { result = result && (getLowRedundancy() == other.getLowRedundancy()); } result = result && (hasCorruptBlocks() == other.hasCorruptBlocks()); if (hasCorruptBlocks()) { result = result && (getCorruptBlocks() == other.getCorruptBlocks()); } result = result && (hasMissingBlocks() == other.hasMissingBlocks()); if (hasMissingBlocks()) { result = result && (getMissingBlocks() == other.getMissingBlocks()); } result = result && (hasBlocksInFuture() == other.hasBlocksInFuture()); if (hasBlocksInFuture()) { result = result && (getBlocksInFuture() == other.getBlocksInFuture()); } result = result && (hasPendingDeletionBlocks() == other.hasPendingDeletionBlocks()); if (hasPendingDeletionBlocks()) { result = result && (getPendingDeletionBlocks() == other.getPendingDeletionBlocks()); } result = result && (hasHighestPrioLowRedundancyBlocks() == other.hasHighestPrioLowRedundancyBlocks()); if (hasHighestPrioLowRedundancyBlocks()) { result = result && (getHighestPrioLowRedundancyBlocks() == other.getHighestPrioLowRedundancyBlocks()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasLowRedundancy()) { hash = (37 * hash) + LOW_REDUNDANCY_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLowRedundancy()); } if (hasCorruptBlocks()) { hash = (37 * hash) + CORRUPT_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCorruptBlocks()); } if (hasMissingBlocks()) { hash = (37 * hash) + MISSING_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMissingBlocks()); } if (hasBlocksInFuture()) { hash = (37 * hash) + BLOCKS_IN_FUTURE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBlocksInFuture()); } if (hasPendingDeletionBlocks()) { hash = (37 * hash) + PENDING_DELETION_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getPendingDeletionBlocks()); } if (hasHighestPrioLowRedundancyBlocks()) { hash = (37 * hash) + HIGHEST_PRIO_LOW_REDUNDANCY_BLOCKS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getHighestPrioLowRedundancyBlocks()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetFsECBlockGroupStatsResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsECBlockGroupStatsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsECBlockGroupStatsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); lowRedundancy_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); corruptBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); missingBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); blocksInFuture_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); pendingDeletionBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); highestPrioLowRedundancyBlocks_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFsECBlockGroupStatsResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.lowRedundancy_ = lowRedundancy_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.corruptBlocks_ = corruptBlocks_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.missingBlocks_ = missingBlocks_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.blocksInFuture_ = blocksInFuture_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.pendingDeletionBlocks_ = pendingDeletionBlocks_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.highestPrioLowRedundancyBlocks_ = highestPrioLowRedundancyBlocks_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.getDefaultInstance()) return this; if (other.hasLowRedundancy()) { setLowRedundancy(other.getLowRedundancy()); } if (other.hasCorruptBlocks()) { setCorruptBlocks(other.getCorruptBlocks()); } if (other.hasMissingBlocks()) { setMissingBlocks(other.getMissingBlocks()); } if (other.hasBlocksInFuture()) { setBlocksInFuture(other.getBlocksInFuture()); } if (other.hasPendingDeletionBlocks()) { setPendingDeletionBlocks(other.getPendingDeletionBlocks()); } if (other.hasHighestPrioLowRedundancyBlocks()) { setHighestPrioLowRedundancyBlocks(other.getHighestPrioLowRedundancyBlocks()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasLowRedundancy()) { return false; } if (!hasCorruptBlocks()) { return false; } if (!hasMissingBlocks()) { return false; } if (!hasBlocksInFuture()) { return false; } if (!hasPendingDeletionBlocks()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 low_redundancy = 1; private long lowRedundancy_ ; /** * required uint64 low_redundancy = 1; */ public boolean hasLowRedundancy() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 low_redundancy = 1; */ public long getLowRedundancy() { return lowRedundancy_; } /** * required uint64 low_redundancy = 1; */ public Builder setLowRedundancy(long value) { bitField0_ |= 0x00000001; lowRedundancy_ = value; onChanged(); return this; } /** * required uint64 low_redundancy = 1; */ public Builder clearLowRedundancy() { bitField0_ = (bitField0_ & ~0x00000001); lowRedundancy_ = 0L; onChanged(); return this; } // required uint64 corrupt_blocks = 2; private long corruptBlocks_ ; /** * required uint64 corrupt_blocks = 2; */ public boolean hasCorruptBlocks() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 corrupt_blocks = 2; */ public long getCorruptBlocks() { return corruptBlocks_; } /** * required uint64 corrupt_blocks = 2; */ public Builder setCorruptBlocks(long value) { bitField0_ |= 0x00000002; corruptBlocks_ = value; onChanged(); return this; } /** * required uint64 corrupt_blocks = 2; */ public Builder clearCorruptBlocks() { bitField0_ = (bitField0_ & ~0x00000002); corruptBlocks_ = 0L; onChanged(); return this; } // required uint64 missing_blocks = 3; private long missingBlocks_ ; /** * required uint64 missing_blocks = 3; */ public boolean hasMissingBlocks() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 missing_blocks = 3; */ public long getMissingBlocks() { return missingBlocks_; } /** * required uint64 missing_blocks = 3; */ public Builder setMissingBlocks(long value) { bitField0_ |= 0x00000004; missingBlocks_ = value; onChanged(); return this; } /** * required uint64 missing_blocks = 3; */ public Builder clearMissingBlocks() { bitField0_ = (bitField0_ & ~0x00000004); missingBlocks_ = 0L; onChanged(); return this; } // required uint64 blocks_in_future = 4; private long blocksInFuture_ ; /** * required uint64 blocks_in_future = 4; */ public boolean hasBlocksInFuture() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required uint64 blocks_in_future = 4; */ public long getBlocksInFuture() { return blocksInFuture_; } /** * required uint64 blocks_in_future = 4; */ public Builder setBlocksInFuture(long value) { bitField0_ |= 0x00000008; blocksInFuture_ = value; onChanged(); return this; } /** * required uint64 blocks_in_future = 4; */ public Builder clearBlocksInFuture() { bitField0_ = (bitField0_ & ~0x00000008); blocksInFuture_ = 0L; onChanged(); return this; } // required uint64 pending_deletion_blocks = 5; private long pendingDeletionBlocks_ ; /** * required uint64 pending_deletion_blocks = 5; */ public boolean hasPendingDeletionBlocks() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required uint64 pending_deletion_blocks = 5; */ public long getPendingDeletionBlocks() { return pendingDeletionBlocks_; } /** * required uint64 pending_deletion_blocks = 5; */ public Builder setPendingDeletionBlocks(long value) { bitField0_ |= 0x00000010; pendingDeletionBlocks_ = value; onChanged(); return this; } /** * required uint64 pending_deletion_blocks = 5; */ public Builder clearPendingDeletionBlocks() { bitField0_ = (bitField0_ & ~0x00000010); pendingDeletionBlocks_ = 0L; onChanged(); return this; } // optional uint64 highest_prio_low_redundancy_blocks = 6; private long highestPrioLowRedundancyBlocks_ ; /** * optional uint64 highest_prio_low_redundancy_blocks = 6; */ public boolean hasHighestPrioLowRedundancyBlocks() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional uint64 highest_prio_low_redundancy_blocks = 6; */ public long getHighestPrioLowRedundancyBlocks() { return highestPrioLowRedundancyBlocks_; } /** * optional uint64 highest_prio_low_redundancy_blocks = 6; */ public Builder setHighestPrioLowRedundancyBlocks(long value) { bitField0_ |= 0x00000020; highestPrioLowRedundancyBlocks_ = value; onChanged(); return this; } /** * optional uint64 highest_prio_low_redundancy_blocks = 6; */ public Builder clearHighestPrioLowRedundancyBlocks() { bitField0_ = (bitField0_ & ~0x00000020); highestPrioLowRedundancyBlocks_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetFsECBlockGroupStatsResponseProto) } static { defaultInstance = new GetFsECBlockGroupStatsResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetFsECBlockGroupStatsResponseProto) } public interface GetDatanodeReportRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.DatanodeReportTypeProto type = 1; /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ boolean hasType(); /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto getType(); } /** * Protobuf type {@code hadoop.hdfs.GetDatanodeReportRequestProto} */ public static final class GetDatanodeReportRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetDatanodeReportRequestProtoOrBuilder { // Use GetDatanodeReportRequestProto.newBuilder() to construct. private GetDatanodeReportRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetDatanodeReportRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetDatanodeReportRequestProto defaultInstance; public static GetDatanodeReportRequestProto getDefaultInstance() { return defaultInstance; } public GetDatanodeReportRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetDatanodeReportRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; type_ = value; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeReportRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeReportRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetDatanodeReportRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetDatanodeReportRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.DatanodeReportTypeProto type = 1; public static final int TYPE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto type_; /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public boolean hasType() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto getType() { return type_; } private void initFields() { type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto.ALL; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasType()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeEnum(1, type_.getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeEnumSize(1, type_.getNumber()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto) obj; boolean result = true; result = result && (hasType() == other.hasType()); if (hasType()) { result = result && (getType() == other.getType()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasType()) { hash = (37 * hash) + TYPE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getType()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetDatanodeReportRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeReportRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeReportRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto.ALL; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeReportRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.type_ = type_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.getDefaultInstance()) return this; if (other.hasType()) { setType(other.getType()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasType()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.DatanodeReportTypeProto type = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto.ALL; /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public boolean hasType() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto getType() { return type_; } /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public Builder setType(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; type_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public Builder clearType() { bitField0_ = (bitField0_ & ~0x00000001); type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto.ALL; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDatanodeReportRequestProto) } static { defaultInstance = new GetDatanodeReportRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDatanodeReportRequestProto) } public interface GetDatanodeReportResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.DatanodeInfoProto di = 1; /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ java.util.List getDiList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDi(int index); /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ int getDiCount(); /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ java.util.List getDiOrBuilderList(); /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDiOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.GetDatanodeReportResponseProto} */ public static final class GetDatanodeReportResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetDatanodeReportResponseProtoOrBuilder { // Use GetDatanodeReportResponseProto.newBuilder() to construct. private GetDatanodeReportResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetDatanodeReportResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetDatanodeReportResponseProto defaultInstance; public static GetDatanodeReportResponseProto getDefaultInstance() { return defaultInstance; } public GetDatanodeReportResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetDatanodeReportResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { di_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } di_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry)); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { di_ = java.util.Collections.unmodifiableList(di_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeReportResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeReportResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetDatanodeReportResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetDatanodeReportResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } // repeated .hadoop.hdfs.DatanodeInfoProto di = 1; public static final int DI_FIELD_NUMBER = 1; private java.util.List di_; /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public java.util.List getDiList() { return di_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public java.util.List getDiOrBuilderList() { return di_; } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public int getDiCount() { return di_.size(); } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDi(int index) { return di_.get(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDiOrBuilder( int index) { return di_.get(index); } private void initFields() { di_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getDiCount(); i++) { if (!getDi(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < di_.size(); i++) { output.writeMessage(1, di_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < di_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, di_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto) obj; boolean result = true; result = result && getDiList() .equals(other.getDiList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getDiCount() > 0) { hash = (37 * hash) + DI_FIELD_NUMBER; hash = (53 * hash) + getDiList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetDatanodeReportResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeReportResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeReportResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getDiFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (diBuilder_ == null) { di_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { diBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeReportResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto(this); int from_bitField0_ = bitField0_; if (diBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { di_ = java.util.Collections.unmodifiableList(di_); bitField0_ = (bitField0_ & ~0x00000001); } result.di_ = di_; } else { result.di_ = diBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance()) return this; if (diBuilder_ == null) { if (!other.di_.isEmpty()) { if (di_.isEmpty()) { di_ = other.di_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureDiIsMutable(); di_.addAll(other.di_); } onChanged(); } } else { if (!other.di_.isEmpty()) { if (diBuilder_.isEmpty()) { diBuilder_.dispose(); diBuilder_ = null; di_ = other.di_; bitField0_ = (bitField0_ & ~0x00000001); diBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getDiFieldBuilder() : null; } else { diBuilder_.addAllMessages(other.di_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getDiCount(); i++) { if (!getDi(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.DatanodeInfoProto di = 1; private java.util.List di_ = java.util.Collections.emptyList(); private void ensureDiIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { di_ = new java.util.ArrayList(di_); bitField0_ |= 0x00000001; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> diBuilder_; /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public java.util.List getDiList() { if (diBuilder_ == null) { return java.util.Collections.unmodifiableList(di_); } else { return diBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public int getDiCount() { if (diBuilder_ == null) { return di_.size(); } else { return diBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDi(int index) { if (diBuilder_ == null) { return di_.get(index); } else { return diBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public Builder setDi( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (diBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiIsMutable(); di_.set(index, value); onChanged(); } else { diBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public Builder setDi( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (diBuilder_ == null) { ensureDiIsMutable(); di_.set(index, builderForValue.build()); onChanged(); } else { diBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public Builder addDi(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (diBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiIsMutable(); di_.add(value); onChanged(); } else { diBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public Builder addDi( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (diBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDiIsMutable(); di_.add(index, value); onChanged(); } else { diBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public Builder addDi( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (diBuilder_ == null) { ensureDiIsMutable(); di_.add(builderForValue.build()); onChanged(); } else { diBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public Builder addDi( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (diBuilder_ == null) { ensureDiIsMutable(); di_.add(index, builderForValue.build()); onChanged(); } else { diBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public Builder addAllDi( java.lang.Iterable values) { if (diBuilder_ == null) { ensureDiIsMutable(); super.addAll(values, di_); onChanged(); } else { diBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public Builder clearDi() { if (diBuilder_ == null) { di_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { diBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public Builder removeDi(int index) { if (diBuilder_ == null) { ensureDiIsMutable(); di_.remove(index); onChanged(); } else { diBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getDiBuilder( int index) { return getDiFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDiOrBuilder( int index) { if (diBuilder_ == null) { return di_.get(index); } else { return diBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public java.util.List getDiOrBuilderList() { if (diBuilder_ != null) { return diBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(di_); } } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDiBuilder() { return getDiFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDiBuilder( int index) { return getDiFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeInfoProto di = 1; */ public java.util.List getDiBuilderList() { return getDiFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> getDiFieldBuilder() { if (diBuilder_ == null) { diBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( di_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); di_ = null; } return diBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDatanodeReportResponseProto) } static { defaultInstance = new GetDatanodeReportResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDatanodeReportResponseProto) } public interface GetDatanodeStorageReportRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.DatanodeReportTypeProto type = 1; /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ boolean hasType(); /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto getType(); } /** * Protobuf type {@code hadoop.hdfs.GetDatanodeStorageReportRequestProto} */ public static final class GetDatanodeStorageReportRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetDatanodeStorageReportRequestProtoOrBuilder { // Use GetDatanodeStorageReportRequestProto.newBuilder() to construct. private GetDatanodeStorageReportRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetDatanodeStorageReportRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetDatanodeStorageReportRequestProto defaultInstance; public static GetDatanodeStorageReportRequestProto getDefaultInstance() { return defaultInstance; } public GetDatanodeStorageReportRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetDatanodeStorageReportRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; type_ = value; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeStorageReportRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeStorageReportRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetDatanodeStorageReportRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetDatanodeStorageReportRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.DatanodeReportTypeProto type = 1; public static final int TYPE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto type_; /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public boolean hasType() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto getType() { return type_; } private void initFields() { type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto.ALL; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasType()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeEnum(1, type_.getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeEnumSize(1, type_.getNumber()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto) obj; boolean result = true; result = result && (hasType() == other.hasType()); if (hasType()) { result = result && (getType() == other.getType()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasType()) { hash = (37 * hash) + TYPE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getType()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetDatanodeStorageReportRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeStorageReportRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeStorageReportRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto.ALL; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeStorageReportRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.type_ = type_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto.getDefaultInstance()) return this; if (other.hasType()) { setType(other.getType()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasType()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.DatanodeReportTypeProto type = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto.ALL; /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public boolean hasType() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto getType() { return type_; } /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public Builder setType(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; type_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.DatanodeReportTypeProto type = 1; */ public Builder clearType() { bitField0_ = (bitField0_ & ~0x00000001); type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto.ALL; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDatanodeStorageReportRequestProto) } static { defaultInstance = new GetDatanodeStorageReportRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDatanodeStorageReportRequestProto) } public interface DatanodeStorageReportProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ boolean hasDatanodeInfo(); /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodeInfo(); /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodeInfoOrBuilder(); // repeated .hadoop.hdfs.StorageReportProto storageReports = 2; /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ java.util.List getStorageReportsList(); /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getStorageReports(int index); /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ int getStorageReportsCount(); /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ java.util.List getStorageReportsOrBuilderList(); /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder getStorageReportsOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.DatanodeStorageReportProto} */ public static final class DatanodeStorageReportProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements DatanodeStorageReportProtoOrBuilder { // Use DatanodeStorageReportProto.newBuilder() to construct. private DatanodeStorageReportProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DatanodeStorageReportProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DatanodeStorageReportProto defaultInstance; public static DatanodeStorageReportProto getDefaultInstance() { return defaultInstance; } public DatanodeStorageReportProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatanodeStorageReportProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = datanodeInfo_.toBuilder(); } datanodeInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(datanodeInfo_); datanodeInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { storageReports_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000002; } storageReports_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.PARSER, extensionRegistry)); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { storageReports_ = java.util.Collections.unmodifiableList(storageReports_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DatanodeStorageReportProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DatanodeStorageReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public DatanodeStorageReportProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new DatanodeStorageReportProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; public static final int DATANODEINFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto datanodeInfo_; /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ public boolean hasDatanodeInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodeInfo() { return datanodeInfo_; } /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodeInfoOrBuilder() { return datanodeInfo_; } // repeated .hadoop.hdfs.StorageReportProto storageReports = 2; public static final int STORAGEREPORTS_FIELD_NUMBER = 2; private java.util.List storageReports_; /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public java.util.List getStorageReportsList() { return storageReports_; } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public java.util.List getStorageReportsOrBuilderList() { return storageReports_; } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public int getStorageReportsCount() { return storageReports_.size(); } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getStorageReports(int index) { return storageReports_.get(index); } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder getStorageReportsOrBuilder( int index) { return storageReports_.get(index); } private void initFields() { datanodeInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); storageReports_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasDatanodeInfo()) { memoizedIsInitialized = 0; return false; } if (!getDatanodeInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getStorageReportsCount(); i++) { if (!getStorageReports(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, datanodeInfo_); } for (int i = 0; i < storageReports_.size(); i++) { output.writeMessage(2, storageReports_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, datanodeInfo_); } for (int i = 0; i < storageReports_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(2, storageReports_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto) obj; boolean result = true; result = result && (hasDatanodeInfo() == other.hasDatanodeInfo()); if (hasDatanodeInfo()) { result = result && getDatanodeInfo() .equals(other.getDatanodeInfo()); } result = result && getStorageReportsList() .equals(other.getStorageReportsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasDatanodeInfo()) { hash = (37 * hash) + DATANODEINFO_FIELD_NUMBER; hash = (53 * hash) + getDatanodeInfo().hashCode(); } if (getStorageReportsCount() > 0) { hash = (37 * hash) + STORAGEREPORTS_FIELD_NUMBER; hash = (53 * hash) + getStorageReportsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DatanodeStorageReportProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DatanodeStorageReportProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DatanodeStorageReportProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getDatanodeInfoFieldBuilder(); getStorageReportsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (datanodeInfoBuilder_ == null) { datanodeInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); } else { datanodeInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (storageReportsBuilder_ == null) { storageReports_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); } else { storageReportsBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DatanodeStorageReportProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (datanodeInfoBuilder_ == null) { result.datanodeInfo_ = datanodeInfo_; } else { result.datanodeInfo_ = datanodeInfoBuilder_.build(); } if (storageReportsBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002)) { storageReports_ = java.util.Collections.unmodifiableList(storageReports_); bitField0_ = (bitField0_ & ~0x00000002); } result.storageReports_ = storageReports_; } else { result.storageReports_ = storageReportsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.getDefaultInstance()) return this; if (other.hasDatanodeInfo()) { mergeDatanodeInfo(other.getDatanodeInfo()); } if (storageReportsBuilder_ == null) { if (!other.storageReports_.isEmpty()) { if (storageReports_.isEmpty()) { storageReports_ = other.storageReports_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureStorageReportsIsMutable(); storageReports_.addAll(other.storageReports_); } onChanged(); } } else { if (!other.storageReports_.isEmpty()) { if (storageReportsBuilder_.isEmpty()) { storageReportsBuilder_.dispose(); storageReportsBuilder_ = null; storageReports_ = other.storageReports_; bitField0_ = (bitField0_ & ~0x00000002); storageReportsBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getStorageReportsFieldBuilder() : null; } else { storageReportsBuilder_.addAllMessages(other.storageReports_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasDatanodeInfo()) { return false; } if (!getDatanodeInfo().isInitialized()) { return false; } for (int i = 0; i < getStorageReportsCount(); i++) { if (!getStorageReports(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto datanodeInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> datanodeInfoBuilder_; /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ public boolean hasDatanodeInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodeInfo() { if (datanodeInfoBuilder_ == null) { return datanodeInfo_; } else { return datanodeInfoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ public Builder setDatanodeInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (datanodeInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } datanodeInfo_ = value; onChanged(); } else { datanodeInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ public Builder setDatanodeInfo( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { if (datanodeInfoBuilder_ == null) { datanodeInfo_ = builderForValue.build(); onChanged(); } else { datanodeInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ public Builder mergeDatanodeInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { if (datanodeInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && datanodeInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) { datanodeInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(datanodeInfo_).mergeFrom(value).buildPartial(); } else { datanodeInfo_ = value; } onChanged(); } else { datanodeInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ public Builder clearDatanodeInfo() { if (datanodeInfoBuilder_ == null) { datanodeInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); onChanged(); } else { datanodeInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getDatanodeInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getDatanodeInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodeInfoOrBuilder() { if (datanodeInfoBuilder_ != null) { return datanodeInfoBuilder_.getMessageOrBuilder(); } else { return datanodeInfo_; } } /** * required .hadoop.hdfs.DatanodeInfoProto datanodeInfo = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> getDatanodeInfoFieldBuilder() { if (datanodeInfoBuilder_ == null) { datanodeInfoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( datanodeInfo_, getParentForChildren(), isClean()); datanodeInfo_ = null; } return datanodeInfoBuilder_; } // repeated .hadoop.hdfs.StorageReportProto storageReports = 2; private java.util.List storageReports_ = java.util.Collections.emptyList(); private void ensureStorageReportsIsMutable() { if (!((bitField0_ & 0x00000002) == 0x00000002)) { storageReports_ = new java.util.ArrayList(storageReports_); bitField0_ |= 0x00000002; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder> storageReportsBuilder_; /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public java.util.List getStorageReportsList() { if (storageReportsBuilder_ == null) { return java.util.Collections.unmodifiableList(storageReports_); } else { return storageReportsBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public int getStorageReportsCount() { if (storageReportsBuilder_ == null) { return storageReports_.size(); } else { return storageReportsBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getStorageReports(int index) { if (storageReportsBuilder_ == null) { return storageReports_.get(index); } else { return storageReportsBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public Builder setStorageReports( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto value) { if (storageReportsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureStorageReportsIsMutable(); storageReports_.set(index, value); onChanged(); } else { storageReportsBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public Builder setStorageReports( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder builderForValue) { if (storageReportsBuilder_ == null) { ensureStorageReportsIsMutable(); storageReports_.set(index, builderForValue.build()); onChanged(); } else { storageReportsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public Builder addStorageReports(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto value) { if (storageReportsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureStorageReportsIsMutable(); storageReports_.add(value); onChanged(); } else { storageReportsBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public Builder addStorageReports( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto value) { if (storageReportsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureStorageReportsIsMutable(); storageReports_.add(index, value); onChanged(); } else { storageReportsBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public Builder addStorageReports( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder builderForValue) { if (storageReportsBuilder_ == null) { ensureStorageReportsIsMutable(); storageReports_.add(builderForValue.build()); onChanged(); } else { storageReportsBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public Builder addStorageReports( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder builderForValue) { if (storageReportsBuilder_ == null) { ensureStorageReportsIsMutable(); storageReports_.add(index, builderForValue.build()); onChanged(); } else { storageReportsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public Builder addAllStorageReports( java.lang.Iterable values) { if (storageReportsBuilder_ == null) { ensureStorageReportsIsMutable(); super.addAll(values, storageReports_); onChanged(); } else { storageReportsBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public Builder clearStorageReports() { if (storageReportsBuilder_ == null) { storageReports_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { storageReportsBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public Builder removeStorageReports(int index) { if (storageReportsBuilder_ == null) { ensureStorageReportsIsMutable(); storageReports_.remove(index); onChanged(); } else { storageReportsBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder getStorageReportsBuilder( int index) { return getStorageReportsFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder getStorageReportsOrBuilder( int index) { if (storageReportsBuilder_ == null) { return storageReports_.get(index); } else { return storageReportsBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public java.util.List getStorageReportsOrBuilderList() { if (storageReportsBuilder_ != null) { return storageReportsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(storageReports_); } } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder addStorageReportsBuilder() { return getStorageReportsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder addStorageReportsBuilder( int index) { return getStorageReportsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.StorageReportProto storageReports = 2; */ public java.util.List getStorageReportsBuilderList() { return getStorageReportsFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder> getStorageReportsFieldBuilder() { if (storageReportsBuilder_ == null) { storageReportsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder>( storageReports_, ((bitField0_ & 0x00000002) == 0x00000002), getParentForChildren(), isClean()); storageReports_ = null; } return storageReportsBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeStorageReportProto) } static { defaultInstance = new DatanodeStorageReportProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeStorageReportProto) } public interface GetDatanodeStorageReportResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ java.util.List getDatanodeStorageReportsList(); /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto getDatanodeStorageReports(int index); /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ int getDatanodeStorageReportsCount(); /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ java.util.List getDatanodeStorageReportsOrBuilderList(); /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProtoOrBuilder getDatanodeStorageReportsOrBuilder( int index); } /** * Protobuf type {@code hadoop.hdfs.GetDatanodeStorageReportResponseProto} */ public static final class GetDatanodeStorageReportResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetDatanodeStorageReportResponseProtoOrBuilder { // Use GetDatanodeStorageReportResponseProto.newBuilder() to construct. private GetDatanodeStorageReportResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetDatanodeStorageReportResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetDatanodeStorageReportResponseProto defaultInstance; public static GetDatanodeStorageReportResponseProto getDefaultInstance() { return defaultInstance; } public GetDatanodeStorageReportResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetDatanodeStorageReportResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { datanodeStorageReports_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } datanodeStorageReports_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.PARSER, extensionRegistry)); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { datanodeStorageReports_ = java.util.Collections.unmodifiableList(datanodeStorageReports_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeStorageReportResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeStorageReportResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetDatanodeStorageReportResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetDatanodeStorageReportResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } // repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; public static final int DATANODESTORAGEREPORTS_FIELD_NUMBER = 1; private java.util.List datanodeStorageReports_; /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public java.util.List getDatanodeStorageReportsList() { return datanodeStorageReports_; } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public java.util.List getDatanodeStorageReportsOrBuilderList() { return datanodeStorageReports_; } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public int getDatanodeStorageReportsCount() { return datanodeStorageReports_.size(); } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto getDatanodeStorageReports(int index) { return datanodeStorageReports_.get(index); } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProtoOrBuilder getDatanodeStorageReportsOrBuilder( int index) { return datanodeStorageReports_.get(index); } private void initFields() { datanodeStorageReports_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getDatanodeStorageReportsCount(); i++) { if (!getDatanodeStorageReports(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < datanodeStorageReports_.size(); i++) { output.writeMessage(1, datanodeStorageReports_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < datanodeStorageReports_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, datanodeStorageReports_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto) obj; boolean result = true; result = result && getDatanodeStorageReportsList() .equals(other.getDatanodeStorageReportsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getDatanodeStorageReportsCount() > 0) { hash = (37 * hash) + DATANODESTORAGEREPORTS_FIELD_NUMBER; hash = (53 * hash) + getDatanodeStorageReportsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetDatanodeStorageReportResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeStorageReportResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeStorageReportResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getDatanodeStorageReportsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (datanodeStorageReportsBuilder_ == null) { datanodeStorageReports_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { datanodeStorageReportsBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeStorageReportResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto(this); int from_bitField0_ = bitField0_; if (datanodeStorageReportsBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { datanodeStorageReports_ = java.util.Collections.unmodifiableList(datanodeStorageReports_); bitField0_ = (bitField0_ & ~0x00000001); } result.datanodeStorageReports_ = datanodeStorageReports_; } else { result.datanodeStorageReports_ = datanodeStorageReportsBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.getDefaultInstance()) return this; if (datanodeStorageReportsBuilder_ == null) { if (!other.datanodeStorageReports_.isEmpty()) { if (datanodeStorageReports_.isEmpty()) { datanodeStorageReports_ = other.datanodeStorageReports_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureDatanodeStorageReportsIsMutable(); datanodeStorageReports_.addAll(other.datanodeStorageReports_); } onChanged(); } } else { if (!other.datanodeStorageReports_.isEmpty()) { if (datanodeStorageReportsBuilder_.isEmpty()) { datanodeStorageReportsBuilder_.dispose(); datanodeStorageReportsBuilder_ = null; datanodeStorageReports_ = other.datanodeStorageReports_; bitField0_ = (bitField0_ & ~0x00000001); datanodeStorageReportsBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getDatanodeStorageReportsFieldBuilder() : null; } else { datanodeStorageReportsBuilder_.addAllMessages(other.datanodeStorageReports_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getDatanodeStorageReportsCount(); i++) { if (!getDatanodeStorageReports(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; private java.util.List datanodeStorageReports_ = java.util.Collections.emptyList(); private void ensureDatanodeStorageReportsIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { datanodeStorageReports_ = new java.util.ArrayList(datanodeStorageReports_); bitField0_ |= 0x00000001; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProtoOrBuilder> datanodeStorageReportsBuilder_; /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public java.util.List getDatanodeStorageReportsList() { if (datanodeStorageReportsBuilder_ == null) { return java.util.Collections.unmodifiableList(datanodeStorageReports_); } else { return datanodeStorageReportsBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public int getDatanodeStorageReportsCount() { if (datanodeStorageReportsBuilder_ == null) { return datanodeStorageReports_.size(); } else { return datanodeStorageReportsBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto getDatanodeStorageReports(int index) { if (datanodeStorageReportsBuilder_ == null) { return datanodeStorageReports_.get(index); } else { return datanodeStorageReportsBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public Builder setDatanodeStorageReports( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto value) { if (datanodeStorageReportsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodeStorageReportsIsMutable(); datanodeStorageReports_.set(index, value); onChanged(); } else { datanodeStorageReportsBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public Builder setDatanodeStorageReports( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.Builder builderForValue) { if (datanodeStorageReportsBuilder_ == null) { ensureDatanodeStorageReportsIsMutable(); datanodeStorageReports_.set(index, builderForValue.build()); onChanged(); } else { datanodeStorageReportsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public Builder addDatanodeStorageReports(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto value) { if (datanodeStorageReportsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodeStorageReportsIsMutable(); datanodeStorageReports_.add(value); onChanged(); } else { datanodeStorageReportsBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public Builder addDatanodeStorageReports( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto value) { if (datanodeStorageReportsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureDatanodeStorageReportsIsMutable(); datanodeStorageReports_.add(index, value); onChanged(); } else { datanodeStorageReportsBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public Builder addDatanodeStorageReports( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.Builder builderForValue) { if (datanodeStorageReportsBuilder_ == null) { ensureDatanodeStorageReportsIsMutable(); datanodeStorageReports_.add(builderForValue.build()); onChanged(); } else { datanodeStorageReportsBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public Builder addDatanodeStorageReports( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.Builder builderForValue) { if (datanodeStorageReportsBuilder_ == null) { ensureDatanodeStorageReportsIsMutable(); datanodeStorageReports_.add(index, builderForValue.build()); onChanged(); } else { datanodeStorageReportsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public Builder addAllDatanodeStorageReports( java.lang.Iterable values) { if (datanodeStorageReportsBuilder_ == null) { ensureDatanodeStorageReportsIsMutable(); super.addAll(values, datanodeStorageReports_); onChanged(); } else { datanodeStorageReportsBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public Builder clearDatanodeStorageReports() { if (datanodeStorageReportsBuilder_ == null) { datanodeStorageReports_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { datanodeStorageReportsBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public Builder removeDatanodeStorageReports(int index) { if (datanodeStorageReportsBuilder_ == null) { ensureDatanodeStorageReportsIsMutable(); datanodeStorageReports_.remove(index); onChanged(); } else { datanodeStorageReportsBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.Builder getDatanodeStorageReportsBuilder( int index) { return getDatanodeStorageReportsFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProtoOrBuilder getDatanodeStorageReportsOrBuilder( int index) { if (datanodeStorageReportsBuilder_ == null) { return datanodeStorageReports_.get(index); } else { return datanodeStorageReportsBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public java.util.List getDatanodeStorageReportsOrBuilderList() { if (datanodeStorageReportsBuilder_ != null) { return datanodeStorageReportsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(datanodeStorageReports_); } } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.Builder addDatanodeStorageReportsBuilder() { return getDatanodeStorageReportsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.Builder addDatanodeStorageReportsBuilder( int index) { return getDatanodeStorageReportsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeStorageReportProto datanodeStorageReports = 1; */ public java.util.List getDatanodeStorageReportsBuilderList() { return getDatanodeStorageReportsFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProtoOrBuilder> getDatanodeStorageReportsFieldBuilder() { if (datanodeStorageReportsBuilder_ == null) { datanodeStorageReportsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProtoOrBuilder>( datanodeStorageReports_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); datanodeStorageReports_ = null; } return datanodeStorageReportsBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDatanodeStorageReportResponseProto) } static { defaultInstance = new GetDatanodeStorageReportResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDatanodeStorageReportResponseProto) } public interface GetPreferredBlockSizeRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string filename = 1; /** * required string filename = 1; */ boolean hasFilename(); /** * required string filename = 1; */ java.lang.String getFilename(); /** * required string filename = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFilenameBytes(); } /** * Protobuf type {@code hadoop.hdfs.GetPreferredBlockSizeRequestProto} */ public static final class GetPreferredBlockSizeRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetPreferredBlockSizeRequestProtoOrBuilder { // Use GetPreferredBlockSizeRequestProto.newBuilder() to construct. private GetPreferredBlockSizeRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetPreferredBlockSizeRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetPreferredBlockSizeRequestProto defaultInstance; public static GetPreferredBlockSizeRequestProto getDefaultInstance() { return defaultInstance; } public GetPreferredBlockSizeRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetPreferredBlockSizeRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; filename_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetPreferredBlockSizeRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetPreferredBlockSizeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetPreferredBlockSizeRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetPreferredBlockSizeRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string filename = 1; public static final int FILENAME_FIELD_NUMBER = 1; private java.lang.Object filename_; /** * required string filename = 1; */ public boolean hasFilename() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string filename = 1; */ public java.lang.String getFilename() { java.lang.Object ref = filename_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { filename_ = s; } return s; } } /** * required string filename = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFilenameBytes() { java.lang.Object ref = filename_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); filename_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { filename_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasFilename()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getFilenameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getFilenameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto) obj; boolean result = true; result = result && (hasFilename() == other.hasFilename()); if (hasFilename()) { result = result && getFilename() .equals(other.getFilename()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasFilename()) { hash = (37 * hash) + FILENAME_FIELD_NUMBER; hash = (53 * hash) + getFilename().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetPreferredBlockSizeRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetPreferredBlockSizeRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetPreferredBlockSizeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); filename_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetPreferredBlockSizeRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.filename_ = filename_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.getDefaultInstance()) return this; if (other.hasFilename()) { bitField0_ |= 0x00000001; filename_ = other.filename_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasFilename()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string filename = 1; private java.lang.Object filename_ = ""; /** * required string filename = 1; */ public boolean hasFilename() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string filename = 1; */ public java.lang.String getFilename() { java.lang.Object ref = filename_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); filename_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string filename = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFilenameBytes() { java.lang.Object ref = filename_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); filename_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string filename = 1; */ public Builder setFilename( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; filename_ = value; onChanged(); return this; } /** * required string filename = 1; */ public Builder clearFilename() { bitField0_ = (bitField0_ & ~0x00000001); filename_ = getDefaultInstance().getFilename(); onChanged(); return this; } /** * required string filename = 1; */ public Builder setFilenameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; filename_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetPreferredBlockSizeRequestProto) } static { defaultInstance = new GetPreferredBlockSizeRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetPreferredBlockSizeRequestProto) } public interface GetPreferredBlockSizeResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required uint64 bsize = 1; /** * required uint64 bsize = 1; */ boolean hasBsize(); /** * required uint64 bsize = 1; */ long getBsize(); } /** * Protobuf type {@code hadoop.hdfs.GetPreferredBlockSizeResponseProto} */ public static final class GetPreferredBlockSizeResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetPreferredBlockSizeResponseProtoOrBuilder { // Use GetPreferredBlockSizeResponseProto.newBuilder() to construct. private GetPreferredBlockSizeResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetPreferredBlockSizeResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetPreferredBlockSizeResponseProto defaultInstance; public static GetPreferredBlockSizeResponseProto getDefaultInstance() { return defaultInstance; } public GetPreferredBlockSizeResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetPreferredBlockSizeResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; bsize_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetPreferredBlockSizeResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetPreferredBlockSizeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetPreferredBlockSizeResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetPreferredBlockSizeResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 bsize = 1; public static final int BSIZE_FIELD_NUMBER = 1; private long bsize_; /** * required uint64 bsize = 1; */ public boolean hasBsize() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 bsize = 1; */ public long getBsize() { return bsize_; } private void initFields() { bsize_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBsize()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, bsize_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(1, bsize_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto) obj; boolean result = true; result = result && (hasBsize() == other.hasBsize()); if (hasBsize()) { result = result && (getBsize() == other.getBsize()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBsize()) { hash = (37 * hash) + BSIZE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBsize()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetPreferredBlockSizeResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetPreferredBlockSizeResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetPreferredBlockSizeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); bsize_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetPreferredBlockSizeResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.bsize_ = bsize_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance()) return this; if (other.hasBsize()) { setBsize(other.getBsize()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBsize()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 bsize = 1; private long bsize_ ; /** * required uint64 bsize = 1; */ public boolean hasBsize() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 bsize = 1; */ public long getBsize() { return bsize_; } /** * required uint64 bsize = 1; */ public Builder setBsize(long value) { bitField0_ |= 0x00000001; bsize_ = value; onChanged(); return this; } /** * required uint64 bsize = 1; */ public Builder clearBsize() { bitField0_ = (bitField0_ & ~0x00000001); bsize_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetPreferredBlockSizeResponseProto) } static { defaultInstance = new GetPreferredBlockSizeResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetPreferredBlockSizeResponseProto) } public interface SetSafeModeRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.SafeModeActionProto action = 1; /** * required .hadoop.hdfs.SafeModeActionProto action = 1; */ boolean hasAction(); /** * required .hadoop.hdfs.SafeModeActionProto action = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto getAction(); // optional bool checked = 2 [default = false]; /** * optional bool checked = 2 [default = false]; */ boolean hasChecked(); /** * optional bool checked = 2 [default = false]; */ boolean getChecked(); } /** * Protobuf type {@code hadoop.hdfs.SetSafeModeRequestProto} */ public static final class SetSafeModeRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetSafeModeRequestProtoOrBuilder { // Use SetSafeModeRequestProto.newBuilder() to construct. private SetSafeModeRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetSafeModeRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetSafeModeRequestProto defaultInstance; public static SetSafeModeRequestProto getDefaultInstance() { return defaultInstance; } public SetSafeModeRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetSafeModeRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; action_ = value; } break; } case 16: { bitField0_ |= 0x00000002; checked_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetSafeModeRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetSafeModeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetSafeModeRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetSafeModeRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.SafeModeActionProto action = 1; public static final int ACTION_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto action_; /** * required .hadoop.hdfs.SafeModeActionProto action = 1; */ public boolean hasAction() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.SafeModeActionProto action = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto getAction() { return action_; } // optional bool checked = 2 [default = false]; public static final int CHECKED_FIELD_NUMBER = 2; private boolean checked_; /** * optional bool checked = 2 [default = false]; */ public boolean hasChecked() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool checked = 2 [default = false]; */ public boolean getChecked() { return checked_; } private void initFields() { action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto.SAFEMODE_LEAVE; checked_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasAction()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeEnum(1, action_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(2, checked_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeEnumSize(1, action_.getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(2, checked_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto) obj; boolean result = true; result = result && (hasAction() == other.hasAction()); if (hasAction()) { result = result && (getAction() == other.getAction()); } result = result && (hasChecked() == other.hasChecked()); if (hasChecked()) { result = result && (getChecked() == other.getChecked()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasAction()) { hash = (37 * hash) + ACTION_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getAction()); } if (hasChecked()) { hash = (37 * hash) + CHECKED_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getChecked()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetSafeModeRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetSafeModeRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetSafeModeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto.SAFEMODE_LEAVE; bitField0_ = (bitField0_ & ~0x00000001); checked_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetSafeModeRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.action_ = action_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.checked_ = checked_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.getDefaultInstance()) return this; if (other.hasAction()) { setAction(other.getAction()); } if (other.hasChecked()) { setChecked(other.getChecked()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasAction()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.SafeModeActionProto action = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto.SAFEMODE_LEAVE; /** * required .hadoop.hdfs.SafeModeActionProto action = 1; */ public boolean hasAction() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.SafeModeActionProto action = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto getAction() { return action_; } /** * required .hadoop.hdfs.SafeModeActionProto action = 1; */ public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; action_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.SafeModeActionProto action = 1; */ public Builder clearAction() { bitField0_ = (bitField0_ & ~0x00000001); action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto.SAFEMODE_LEAVE; onChanged(); return this; } // optional bool checked = 2 [default = false]; private boolean checked_ ; /** * optional bool checked = 2 [default = false]; */ public boolean hasChecked() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool checked = 2 [default = false]; */ public boolean getChecked() { return checked_; } /** * optional bool checked = 2 [default = false]; */ public Builder setChecked(boolean value) { bitField0_ |= 0x00000002; checked_ = value; onChanged(); return this; } /** * optional bool checked = 2 [default = false]; */ public Builder clearChecked() { bitField0_ = (bitField0_ & ~0x00000002); checked_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetSafeModeRequestProto) } static { defaultInstance = new SetSafeModeRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetSafeModeRequestProto) } public interface SetSafeModeResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required bool result = 1; /** * required bool result = 1; */ boolean hasResult(); /** * required bool result = 1; */ boolean getResult(); } /** * Protobuf type {@code hadoop.hdfs.SetSafeModeResponseProto} */ public static final class SetSafeModeResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetSafeModeResponseProtoOrBuilder { // Use SetSafeModeResponseProto.newBuilder() to construct. private SetSafeModeResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetSafeModeResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetSafeModeResponseProto defaultInstance; public static SetSafeModeResponseProto getDefaultInstance() { return defaultInstance; } public SetSafeModeResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetSafeModeResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; result_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetSafeModeResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetSafeModeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetSafeModeResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetSafeModeResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool result = 1; public static final int RESULT_FIELD_NUMBER = 1; private boolean result_; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } private void initFields() { result_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasResult()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, result_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, result_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto) obj; boolean result = true; result = result && (hasResult() == other.hasResult()); if (hasResult()) { result = result && (getResult() == other.getResult()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getResult()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetSafeModeResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetSafeModeResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetSafeModeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); result_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetSafeModeResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.result_ = result_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance()) return this; if (other.hasResult()) { setResult(other.getResult()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasResult()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool result = 1; private boolean result_ ; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } /** * required bool result = 1; */ public Builder setResult(boolean value) { bitField0_ |= 0x00000001; result_ = value; onChanged(); return this; } /** * required bool result = 1; */ public Builder clearResult() { bitField0_ = (bitField0_ & ~0x00000001); result_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetSafeModeResponseProto) } static { defaultInstance = new SetSafeModeResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetSafeModeResponseProto) } public interface SaveNamespaceRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional uint64 timeWindow = 1 [default = 0]; /** * optional uint64 timeWindow = 1 [default = 0]; */ boolean hasTimeWindow(); /** * optional uint64 timeWindow = 1 [default = 0]; */ long getTimeWindow(); // optional uint64 txGap = 2 [default = 0]; /** * optional uint64 txGap = 2 [default = 0]; */ boolean hasTxGap(); /** * optional uint64 txGap = 2 [default = 0]; */ long getTxGap(); } /** * Protobuf type {@code hadoop.hdfs.SaveNamespaceRequestProto} */ public static final class SaveNamespaceRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SaveNamespaceRequestProtoOrBuilder { // Use SaveNamespaceRequestProto.newBuilder() to construct. private SaveNamespaceRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SaveNamespaceRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SaveNamespaceRequestProto defaultInstance; public static SaveNamespaceRequestProto getDefaultInstance() { return defaultInstance; } public SaveNamespaceRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SaveNamespaceRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; timeWindow_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; txGap_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SaveNamespaceRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SaveNamespaceRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SaveNamespaceRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SaveNamespaceRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional uint64 timeWindow = 1 [default = 0]; public static final int TIMEWINDOW_FIELD_NUMBER = 1; private long timeWindow_; /** * optional uint64 timeWindow = 1 [default = 0]; */ public boolean hasTimeWindow() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional uint64 timeWindow = 1 [default = 0]; */ public long getTimeWindow() { return timeWindow_; } // optional uint64 txGap = 2 [default = 0]; public static final int TXGAP_FIELD_NUMBER = 2; private long txGap_; /** * optional uint64 txGap = 2 [default = 0]; */ public boolean hasTxGap() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional uint64 txGap = 2 [default = 0]; */ public long getTxGap() { return txGap_; } private void initFields() { timeWindow_ = 0L; txGap_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, timeWindow_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, txGap_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(1, timeWindow_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(2, txGap_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto) obj; boolean result = true; result = result && (hasTimeWindow() == other.hasTimeWindow()); if (hasTimeWindow()) { result = result && (getTimeWindow() == other.getTimeWindow()); } result = result && (hasTxGap() == other.hasTxGap()); if (hasTxGap()) { result = result && (getTxGap() == other.getTxGap()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasTimeWindow()) { hash = (37 * hash) + TIMEWINDOW_FIELD_NUMBER; hash = (53 * hash) + hashLong(getTimeWindow()); } if (hasTxGap()) { hash = (37 * hash) + TXGAP_FIELD_NUMBER; hash = (53 * hash) + hashLong(getTxGap()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SaveNamespaceRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SaveNamespaceRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SaveNamespaceRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); timeWindow_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); txGap_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SaveNamespaceRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.timeWindow_ = timeWindow_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.txGap_ = txGap_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.getDefaultInstance()) return this; if (other.hasTimeWindow()) { setTimeWindow(other.getTimeWindow()); } if (other.hasTxGap()) { setTxGap(other.getTxGap()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional uint64 timeWindow = 1 [default = 0]; private long timeWindow_ ; /** * optional uint64 timeWindow = 1 [default = 0]; */ public boolean hasTimeWindow() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional uint64 timeWindow = 1 [default = 0]; */ public long getTimeWindow() { return timeWindow_; } /** * optional uint64 timeWindow = 1 [default = 0]; */ public Builder setTimeWindow(long value) { bitField0_ |= 0x00000001; timeWindow_ = value; onChanged(); return this; } /** * optional uint64 timeWindow = 1 [default = 0]; */ public Builder clearTimeWindow() { bitField0_ = (bitField0_ & ~0x00000001); timeWindow_ = 0L; onChanged(); return this; } // optional uint64 txGap = 2 [default = 0]; private long txGap_ ; /** * optional uint64 txGap = 2 [default = 0]; */ public boolean hasTxGap() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional uint64 txGap = 2 [default = 0]; */ public long getTxGap() { return txGap_; } /** * optional uint64 txGap = 2 [default = 0]; */ public Builder setTxGap(long value) { bitField0_ |= 0x00000002; txGap_ = value; onChanged(); return this; } /** * optional uint64 txGap = 2 [default = 0]; */ public Builder clearTxGap() { bitField0_ = (bitField0_ & ~0x00000002); txGap_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SaveNamespaceRequestProto) } static { defaultInstance = new SaveNamespaceRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SaveNamespaceRequestProto) } public interface SaveNamespaceResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional bool saved = 1 [default = true]; /** * optional bool saved = 1 [default = true]; */ boolean hasSaved(); /** * optional bool saved = 1 [default = true]; */ boolean getSaved(); } /** * Protobuf type {@code hadoop.hdfs.SaveNamespaceResponseProto} * *
   * void response
   * 
*/ public static final class SaveNamespaceResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SaveNamespaceResponseProtoOrBuilder { // Use SaveNamespaceResponseProto.newBuilder() to construct. private SaveNamespaceResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SaveNamespaceResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SaveNamespaceResponseProto defaultInstance; public static SaveNamespaceResponseProto getDefaultInstance() { return defaultInstance; } public SaveNamespaceResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SaveNamespaceResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; saved_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SaveNamespaceResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SaveNamespaceResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SaveNamespaceResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SaveNamespaceResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional bool saved = 1 [default = true]; public static final int SAVED_FIELD_NUMBER = 1; private boolean saved_; /** * optional bool saved = 1 [default = true]; */ public boolean hasSaved() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional bool saved = 1 [default = true]; */ public boolean getSaved() { return saved_; } private void initFields() { saved_ = true; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, saved_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, saved_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto) obj; boolean result = true; result = result && (hasSaved() == other.hasSaved()); if (hasSaved()) { result = result && (getSaved() == other.getSaved()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSaved()) { hash = (37 * hash) + SAVED_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getSaved()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SaveNamespaceResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SaveNamespaceResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SaveNamespaceResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); saved_ = true; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SaveNamespaceResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.saved_ = saved_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance()) return this; if (other.hasSaved()) { setSaved(other.getSaved()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional bool saved = 1 [default = true]; private boolean saved_ = true; /** * optional bool saved = 1 [default = true]; */ public boolean hasSaved() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional bool saved = 1 [default = true]; */ public boolean getSaved() { return saved_; } /** * optional bool saved = 1 [default = true]; */ public Builder setSaved(boolean value) { bitField0_ |= 0x00000001; saved_ = value; onChanged(); return this; } /** * optional bool saved = 1 [default = true]; */ public Builder clearSaved() { bitField0_ = (bitField0_ & ~0x00000001); saved_ = true; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SaveNamespaceResponseProto) } static { defaultInstance = new SaveNamespaceResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SaveNamespaceResponseProto) } public interface RollEditsRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.RollEditsRequestProto} * *
   * no parameters
   * 
*/ public static final class RollEditsRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RollEditsRequestProtoOrBuilder { // Use RollEditsRequestProto.newBuilder() to construct. private RollEditsRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RollEditsRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RollEditsRequestProto defaultInstance; public static RollEditsRequestProto getDefaultInstance() { return defaultInstance; } public RollEditsRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RollEditsRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollEditsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollEditsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RollEditsRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RollEditsRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RollEditsRequestProto} * *
     * no parameters
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollEditsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollEditsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollEditsRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RollEditsRequestProto) } static { defaultInstance = new RollEditsRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RollEditsRequestProto) } public interface RollEditsResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required uint64 newSegmentTxId = 1; /** * required uint64 newSegmentTxId = 1; */ boolean hasNewSegmentTxId(); /** * required uint64 newSegmentTxId = 1; */ long getNewSegmentTxId(); } /** * Protobuf type {@code hadoop.hdfs.RollEditsResponseProto} * *
   * response
   * 
*/ public static final class RollEditsResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RollEditsResponseProtoOrBuilder { // Use RollEditsResponseProto.newBuilder() to construct. private RollEditsResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RollEditsResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RollEditsResponseProto defaultInstance; public static RollEditsResponseProto getDefaultInstance() { return defaultInstance; } public RollEditsResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RollEditsResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; newSegmentTxId_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollEditsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollEditsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RollEditsResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RollEditsResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required uint64 newSegmentTxId = 1; public static final int NEWSEGMENTTXID_FIELD_NUMBER = 1; private long newSegmentTxId_; /** * required uint64 newSegmentTxId = 1; */ public boolean hasNewSegmentTxId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 newSegmentTxId = 1; */ public long getNewSegmentTxId() { return newSegmentTxId_; } private void initFields() { newSegmentTxId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasNewSegmentTxId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, newSegmentTxId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(1, newSegmentTxId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto) obj; boolean result = true; result = result && (hasNewSegmentTxId() == other.hasNewSegmentTxId()); if (hasNewSegmentTxId()) { result = result && (getNewSegmentTxId() == other.getNewSegmentTxId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasNewSegmentTxId()) { hash = (37 * hash) + NEWSEGMENTTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getNewSegmentTxId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RollEditsResponseProto} * *
     * response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollEditsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollEditsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); newSegmentTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollEditsResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.newSegmentTxId_ = newSegmentTxId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.getDefaultInstance()) return this; if (other.hasNewSegmentTxId()) { setNewSegmentTxId(other.getNewSegmentTxId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasNewSegmentTxId()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 newSegmentTxId = 1; private long newSegmentTxId_ ; /** * required uint64 newSegmentTxId = 1; */ public boolean hasNewSegmentTxId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required uint64 newSegmentTxId = 1; */ public long getNewSegmentTxId() { return newSegmentTxId_; } /** * required uint64 newSegmentTxId = 1; */ public Builder setNewSegmentTxId(long value) { bitField0_ |= 0x00000001; newSegmentTxId_ = value; onChanged(); return this; } /** * required uint64 newSegmentTxId = 1; */ public Builder clearNewSegmentTxId() { bitField0_ = (bitField0_ & ~0x00000001); newSegmentTxId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RollEditsResponseProto) } static { defaultInstance = new RollEditsResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RollEditsResponseProto) } public interface RestoreFailedStorageRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string arg = 1; /** * required string arg = 1; */ boolean hasArg(); /** * required string arg = 1; */ java.lang.String getArg(); /** * required string arg = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getArgBytes(); } /** * Protobuf type {@code hadoop.hdfs.RestoreFailedStorageRequestProto} */ public static final class RestoreFailedStorageRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RestoreFailedStorageRequestProtoOrBuilder { // Use RestoreFailedStorageRequestProto.newBuilder() to construct. private RestoreFailedStorageRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RestoreFailedStorageRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RestoreFailedStorageRequestProto defaultInstance; public static RestoreFailedStorageRequestProto getDefaultInstance() { return defaultInstance; } public RestoreFailedStorageRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RestoreFailedStorageRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; arg_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RestoreFailedStorageRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RestoreFailedStorageRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RestoreFailedStorageRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RestoreFailedStorageRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string arg = 1; public static final int ARG_FIELD_NUMBER = 1; private java.lang.Object arg_; /** * required string arg = 1; */ public boolean hasArg() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string arg = 1; */ public java.lang.String getArg() { java.lang.Object ref = arg_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { arg_ = s; } return s; } } /** * required string arg = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getArgBytes() { java.lang.Object ref = arg_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); arg_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { arg_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasArg()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getArgBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getArgBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto) obj; boolean result = true; result = result && (hasArg() == other.hasArg()); if (hasArg()) { result = result && getArg() .equals(other.getArg()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasArg()) { hash = (37 * hash) + ARG_FIELD_NUMBER; hash = (53 * hash) + getArg().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RestoreFailedStorageRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RestoreFailedStorageRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RestoreFailedStorageRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); arg_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RestoreFailedStorageRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.arg_ = arg_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.getDefaultInstance()) return this; if (other.hasArg()) { bitField0_ |= 0x00000001; arg_ = other.arg_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasArg()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string arg = 1; private java.lang.Object arg_ = ""; /** * required string arg = 1; */ public boolean hasArg() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string arg = 1; */ public java.lang.String getArg() { java.lang.Object ref = arg_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); arg_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string arg = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getArgBytes() { java.lang.Object ref = arg_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); arg_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string arg = 1; */ public Builder setArg( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; arg_ = value; onChanged(); return this; } /** * required string arg = 1; */ public Builder clearArg() { bitField0_ = (bitField0_ & ~0x00000001); arg_ = getDefaultInstance().getArg(); onChanged(); return this; } /** * required string arg = 1; */ public Builder setArgBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; arg_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RestoreFailedStorageRequestProto) } static { defaultInstance = new RestoreFailedStorageRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RestoreFailedStorageRequestProto) } public interface RestoreFailedStorageResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required bool result = 1; /** * required bool result = 1; */ boolean hasResult(); /** * required bool result = 1; */ boolean getResult(); } /** * Protobuf type {@code hadoop.hdfs.RestoreFailedStorageResponseProto} */ public static final class RestoreFailedStorageResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RestoreFailedStorageResponseProtoOrBuilder { // Use RestoreFailedStorageResponseProto.newBuilder() to construct. private RestoreFailedStorageResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RestoreFailedStorageResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RestoreFailedStorageResponseProto defaultInstance; public static RestoreFailedStorageResponseProto getDefaultInstance() { return defaultInstance; } public RestoreFailedStorageResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RestoreFailedStorageResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; result_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RestoreFailedStorageResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RestoreFailedStorageResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RestoreFailedStorageResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RestoreFailedStorageResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool result = 1; public static final int RESULT_FIELD_NUMBER = 1; private boolean result_; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } private void initFields() { result_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasResult()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, result_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, result_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto) obj; boolean result = true; result = result && (hasResult() == other.hasResult()); if (hasResult()) { result = result && (getResult() == other.getResult()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getResult()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RestoreFailedStorageResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RestoreFailedStorageResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RestoreFailedStorageResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); result_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RestoreFailedStorageResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.result_ = result_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance()) return this; if (other.hasResult()) { setResult(other.getResult()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasResult()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool result = 1; private boolean result_ ; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } /** * required bool result = 1; */ public Builder setResult(boolean value) { bitField0_ |= 0x00000001; result_ = value; onChanged(); return this; } /** * required bool result = 1; */ public Builder clearResult() { bitField0_ = (bitField0_ & ~0x00000001); result_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RestoreFailedStorageResponseProto) } static { defaultInstance = new RestoreFailedStorageResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RestoreFailedStorageResponseProto) } public interface RefreshNodesRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.RefreshNodesRequestProto} * *
   * no parameters
   * 
*/ public static final class RefreshNodesRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RefreshNodesRequestProtoOrBuilder { // Use RefreshNodesRequestProto.newBuilder() to construct. private RefreshNodesRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RefreshNodesRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RefreshNodesRequestProto defaultInstance; public static RefreshNodesRequestProto getDefaultInstance() { return defaultInstance; } public RefreshNodesRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RefreshNodesRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNodesRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNodesRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RefreshNodesRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RefreshNodesRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RefreshNodesRequestProto} * *
     * no parameters
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNodesRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNodesRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNodesRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RefreshNodesRequestProto) } static { defaultInstance = new RefreshNodesRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RefreshNodesRequestProto) } public interface RefreshNodesResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.RefreshNodesResponseProto} * *
   * void response
   * 
*/ public static final class RefreshNodesResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RefreshNodesResponseProtoOrBuilder { // Use RefreshNodesResponseProto.newBuilder() to construct. private RefreshNodesResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RefreshNodesResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RefreshNodesResponseProto defaultInstance; public static RefreshNodesResponseProto getDefaultInstance() { return defaultInstance; } public RefreshNodesResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RefreshNodesResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNodesResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNodesResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RefreshNodesResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RefreshNodesResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RefreshNodesResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNodesResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNodesResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNodesResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RefreshNodesResponseProto) } static { defaultInstance = new RefreshNodesResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RefreshNodesResponseProto) } public interface FinalizeUpgradeRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.FinalizeUpgradeRequestProto} * *
   * no parameters
   * 
*/ public static final class FinalizeUpgradeRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements FinalizeUpgradeRequestProtoOrBuilder { // Use FinalizeUpgradeRequestProto.newBuilder() to construct. private FinalizeUpgradeRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private FinalizeUpgradeRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final FinalizeUpgradeRequestProto defaultInstance; public static FinalizeUpgradeRequestProto getDefaultInstance() { return defaultInstance; } public FinalizeUpgradeRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FinalizeUpgradeRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FinalizeUpgradeRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FinalizeUpgradeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public FinalizeUpgradeRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new FinalizeUpgradeRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.FinalizeUpgradeRequestProto} * *
     * no parameters
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FinalizeUpgradeRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FinalizeUpgradeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FinalizeUpgradeRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeUpgradeRequestProto) } static { defaultInstance = new FinalizeUpgradeRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeUpgradeRequestProto) } public interface FinalizeUpgradeResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.FinalizeUpgradeResponseProto} * *
   * void response
   * 
*/ public static final class FinalizeUpgradeResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements FinalizeUpgradeResponseProtoOrBuilder { // Use FinalizeUpgradeResponseProto.newBuilder() to construct. private FinalizeUpgradeResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private FinalizeUpgradeResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final FinalizeUpgradeResponseProto defaultInstance; public static FinalizeUpgradeResponseProto getDefaultInstance() { return defaultInstance; } public FinalizeUpgradeResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FinalizeUpgradeResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FinalizeUpgradeResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FinalizeUpgradeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public FinalizeUpgradeResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new FinalizeUpgradeResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.FinalizeUpgradeResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FinalizeUpgradeResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FinalizeUpgradeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FinalizeUpgradeResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeUpgradeResponseProto) } static { defaultInstance = new FinalizeUpgradeResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeUpgradeResponseProto) } public interface UpgradeStatusRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.UpgradeStatusRequestProto} * *
   * no parameters
   * 
*/ public static final class UpgradeStatusRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements UpgradeStatusRequestProtoOrBuilder { // Use UpgradeStatusRequestProto.newBuilder() to construct. private UpgradeStatusRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private UpgradeStatusRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final UpgradeStatusRequestProto defaultInstance; public static UpgradeStatusRequestProto getDefaultInstance() { return defaultInstance; } public UpgradeStatusRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private UpgradeStatusRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpgradeStatusRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpgradeStatusRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public UpgradeStatusRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new UpgradeStatusRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.UpgradeStatusRequestProto} * *
     * no parameters
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpgradeStatusRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpgradeStatusRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpgradeStatusRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.UpgradeStatusRequestProto) } static { defaultInstance = new UpgradeStatusRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.UpgradeStatusRequestProto) } public interface UpgradeStatusResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required bool upgradeFinalized = 1; /** * required bool upgradeFinalized = 1; */ boolean hasUpgradeFinalized(); /** * required bool upgradeFinalized = 1; */ boolean getUpgradeFinalized(); } /** * Protobuf type {@code hadoop.hdfs.UpgradeStatusResponseProto} */ public static final class UpgradeStatusResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements UpgradeStatusResponseProtoOrBuilder { // Use UpgradeStatusResponseProto.newBuilder() to construct. private UpgradeStatusResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private UpgradeStatusResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final UpgradeStatusResponseProto defaultInstance; public static UpgradeStatusResponseProto getDefaultInstance() { return defaultInstance; } public UpgradeStatusResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private UpgradeStatusResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; upgradeFinalized_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpgradeStatusResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpgradeStatusResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public UpgradeStatusResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new UpgradeStatusResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool upgradeFinalized = 1; public static final int UPGRADEFINALIZED_FIELD_NUMBER = 1; private boolean upgradeFinalized_; /** * required bool upgradeFinalized = 1; */ public boolean hasUpgradeFinalized() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool upgradeFinalized = 1; */ public boolean getUpgradeFinalized() { return upgradeFinalized_; } private void initFields() { upgradeFinalized_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasUpgradeFinalized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, upgradeFinalized_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, upgradeFinalized_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto) obj; boolean result = true; result = result && (hasUpgradeFinalized() == other.hasUpgradeFinalized()); if (hasUpgradeFinalized()) { result = result && (getUpgradeFinalized() == other.getUpgradeFinalized()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasUpgradeFinalized()) { hash = (37 * hash) + UPGRADEFINALIZED_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getUpgradeFinalized()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.UpgradeStatusResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpgradeStatusResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpgradeStatusResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); upgradeFinalized_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpgradeStatusResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.upgradeFinalized_ = upgradeFinalized_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.getDefaultInstance()) return this; if (other.hasUpgradeFinalized()) { setUpgradeFinalized(other.getUpgradeFinalized()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasUpgradeFinalized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool upgradeFinalized = 1; private boolean upgradeFinalized_ ; /** * required bool upgradeFinalized = 1; */ public boolean hasUpgradeFinalized() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool upgradeFinalized = 1; */ public boolean getUpgradeFinalized() { return upgradeFinalized_; } /** * required bool upgradeFinalized = 1; */ public Builder setUpgradeFinalized(boolean value) { bitField0_ |= 0x00000001; upgradeFinalized_ = value; onChanged(); return this; } /** * required bool upgradeFinalized = 1; */ public Builder clearUpgradeFinalized() { bitField0_ = (bitField0_ & ~0x00000001); upgradeFinalized_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.UpgradeStatusResponseProto) } static { defaultInstance = new UpgradeStatusResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.UpgradeStatusResponseProto) } public interface RollingUpgradeRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.RollingUpgradeActionProto action = 1; /** * required .hadoop.hdfs.RollingUpgradeActionProto action = 1; */ boolean hasAction(); /** * required .hadoop.hdfs.RollingUpgradeActionProto action = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto getAction(); } /** * Protobuf type {@code hadoop.hdfs.RollingUpgradeRequestProto} */ public static final class RollingUpgradeRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RollingUpgradeRequestProtoOrBuilder { // Use RollingUpgradeRequestProto.newBuilder() to construct. private RollingUpgradeRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RollingUpgradeRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RollingUpgradeRequestProto defaultInstance; public static RollingUpgradeRequestProto getDefaultInstance() { return defaultInstance; } public RollingUpgradeRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RollingUpgradeRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; action_ = value; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RollingUpgradeRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RollingUpgradeRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.RollingUpgradeActionProto action = 1; public static final int ACTION_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto action_; /** * required .hadoop.hdfs.RollingUpgradeActionProto action = 1; */ public boolean hasAction() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.RollingUpgradeActionProto action = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto getAction() { return action_; } private void initFields() { action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto.QUERY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasAction()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeEnum(1, action_.getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeEnumSize(1, action_.getNumber()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto) obj; boolean result = true; result = result && (hasAction() == other.hasAction()); if (hasAction()) { result = result && (getAction() == other.getAction()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasAction()) { hash = (37 * hash) + ACTION_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getAction()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RollingUpgradeRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto.QUERY; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.action_ = action_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto.getDefaultInstance()) return this; if (other.hasAction()) { setAction(other.getAction()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasAction()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.RollingUpgradeActionProto action = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto.QUERY; /** * required .hadoop.hdfs.RollingUpgradeActionProto action = 1; */ public boolean hasAction() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.RollingUpgradeActionProto action = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto getAction() { return action_; } /** * required .hadoop.hdfs.RollingUpgradeActionProto action = 1; */ public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; action_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.RollingUpgradeActionProto action = 1; */ public Builder clearAction() { bitField0_ = (bitField0_ & ~0x00000001); action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto.QUERY; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RollingUpgradeRequestProto) } static { defaultInstance = new RollingUpgradeRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RollingUpgradeRequestProto) } public interface RollingUpgradeInfoProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ boolean hasStatus(); /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getStatus(); /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getStatusOrBuilder(); // required uint64 startTime = 2; /** * required uint64 startTime = 2; */ boolean hasStartTime(); /** * required uint64 startTime = 2; */ long getStartTime(); // required uint64 finalizeTime = 3; /** * required uint64 finalizeTime = 3; */ boolean hasFinalizeTime(); /** * required uint64 finalizeTime = 3; */ long getFinalizeTime(); // required bool createdRollbackImages = 4; /** * required bool createdRollbackImages = 4; */ boolean hasCreatedRollbackImages(); /** * required bool createdRollbackImages = 4; */ boolean getCreatedRollbackImages(); } /** * Protobuf type {@code hadoop.hdfs.RollingUpgradeInfoProto} */ public static final class RollingUpgradeInfoProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RollingUpgradeInfoProtoOrBuilder { // Use RollingUpgradeInfoProto.newBuilder() to construct. private RollingUpgradeInfoProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RollingUpgradeInfoProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RollingUpgradeInfoProto defaultInstance; public static RollingUpgradeInfoProto getDefaultInstance() { return defaultInstance; } public RollingUpgradeInfoProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RollingUpgradeInfoProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = status_.toBuilder(); } status_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(status_); status_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; startTime_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; finalizeTime_ = input.readUInt64(); break; } case 32: { bitField0_ |= 0x00000008; createdRollbackImages_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeInfoProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RollingUpgradeInfoProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RollingUpgradeInfoProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; public static final int STATUS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto status_; /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ public boolean hasStatus() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getStatus() { return status_; } /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getStatusOrBuilder() { return status_; } // required uint64 startTime = 2; public static final int STARTTIME_FIELD_NUMBER = 2; private long startTime_; /** * required uint64 startTime = 2; */ public boolean hasStartTime() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 startTime = 2; */ public long getStartTime() { return startTime_; } // required uint64 finalizeTime = 3; public static final int FINALIZETIME_FIELD_NUMBER = 3; private long finalizeTime_; /** * required uint64 finalizeTime = 3; */ public boolean hasFinalizeTime() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 finalizeTime = 3; */ public long getFinalizeTime() { return finalizeTime_; } // required bool createdRollbackImages = 4; public static final int CREATEDROLLBACKIMAGES_FIELD_NUMBER = 4; private boolean createdRollbackImages_; /** * required bool createdRollbackImages = 4; */ public boolean hasCreatedRollbackImages() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bool createdRollbackImages = 4; */ public boolean getCreatedRollbackImages() { return createdRollbackImages_; } private void initFields() { status_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance(); startTime_ = 0L; finalizeTime_ = 0L; createdRollbackImages_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasStatus()) { memoizedIsInitialized = 0; return false; } if (!hasStartTime()) { memoizedIsInitialized = 0; return false; } if (!hasFinalizeTime()) { memoizedIsInitialized = 0; return false; } if (!hasCreatedRollbackImages()) { memoizedIsInitialized = 0; return false; } if (!getStatus().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, status_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, startTime_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, finalizeTime_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBool(4, createdRollbackImages_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, status_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(2, startTime_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(3, finalizeTime_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(4, createdRollbackImages_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto) obj; boolean result = true; result = result && (hasStatus() == other.hasStatus()); if (hasStatus()) { result = result && getStatus() .equals(other.getStatus()); } result = result && (hasStartTime() == other.hasStartTime()); if (hasStartTime()) { result = result && (getStartTime() == other.getStartTime()); } result = result && (hasFinalizeTime() == other.hasFinalizeTime()); if (hasFinalizeTime()) { result = result && (getFinalizeTime() == other.getFinalizeTime()); } result = result && (hasCreatedRollbackImages() == other.hasCreatedRollbackImages()); if (hasCreatedRollbackImages()) { result = result && (getCreatedRollbackImages() == other.getCreatedRollbackImages()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasStatus()) { hash = (37 * hash) + STATUS_FIELD_NUMBER; hash = (53 * hash) + getStatus().hashCode(); } if (hasStartTime()) { hash = (37 * hash) + STARTTIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStartTime()); } if (hasFinalizeTime()) { hash = (37 * hash) + FINALIZETIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFinalizeTime()); } if (hasCreatedRollbackImages()) { hash = (37 * hash) + CREATEDROLLBACKIMAGES_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getCreatedRollbackImages()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RollingUpgradeInfoProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeInfoProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getStatusFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (statusBuilder_ == null) { status_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance(); } else { statusBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); startTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); finalizeTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); createdRollbackImages_ = false; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (statusBuilder_ == null) { result.status_ = status_; } else { result.status_ = statusBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.startTime_ = startTime_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.finalizeTime_ = finalizeTime_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.createdRollbackImages_ = createdRollbackImages_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.getDefaultInstance()) return this; if (other.hasStatus()) { mergeStatus(other.getStatus()); } if (other.hasStartTime()) { setStartTime(other.getStartTime()); } if (other.hasFinalizeTime()) { setFinalizeTime(other.getFinalizeTime()); } if (other.hasCreatedRollbackImages()) { setCreatedRollbackImages(other.getCreatedRollbackImages()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasStatus()) { return false; } if (!hasStartTime()) { return false; } if (!hasFinalizeTime()) { return false; } if (!hasCreatedRollbackImages()) { return false; } if (!getStatus().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto status_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder> statusBuilder_; /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ public boolean hasStatus() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getStatus() { if (statusBuilder_ == null) { return status_; } else { return statusBuilder_.getMessage(); } } /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto value) { if (statusBuilder_ == null) { if (value == null) { throw new NullPointerException(); } status_ = value; onChanged(); } else { statusBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ public Builder setStatus( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder builderForValue) { if (statusBuilder_ == null) { status_ = builderForValue.build(); onChanged(); } else { statusBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ public Builder mergeStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto value) { if (statusBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && status_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance()) { status_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.newBuilder(status_).mergeFrom(value).buildPartial(); } else { status_ = value; } onChanged(); } else { statusBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ public Builder clearStatus() { if (statusBuilder_ == null) { status_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance(); onChanged(); } else { statusBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder getStatusBuilder() { bitField0_ |= 0x00000001; onChanged(); return getStatusFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getStatusOrBuilder() { if (statusBuilder_ != null) { return statusBuilder_.getMessageOrBuilder(); } else { return status_; } } /** * required .hadoop.hdfs.RollingUpgradeStatusProto status = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder> getStatusFieldBuilder() { if (statusBuilder_ == null) { statusBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder>( status_, getParentForChildren(), isClean()); status_ = null; } return statusBuilder_; } // required uint64 startTime = 2; private long startTime_ ; /** * required uint64 startTime = 2; */ public boolean hasStartTime() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 startTime = 2; */ public long getStartTime() { return startTime_; } /** * required uint64 startTime = 2; */ public Builder setStartTime(long value) { bitField0_ |= 0x00000002; startTime_ = value; onChanged(); return this; } /** * required uint64 startTime = 2; */ public Builder clearStartTime() { bitField0_ = (bitField0_ & ~0x00000002); startTime_ = 0L; onChanged(); return this; } // required uint64 finalizeTime = 3; private long finalizeTime_ ; /** * required uint64 finalizeTime = 3; */ public boolean hasFinalizeTime() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 finalizeTime = 3; */ public long getFinalizeTime() { return finalizeTime_; } /** * required uint64 finalizeTime = 3; */ public Builder setFinalizeTime(long value) { bitField0_ |= 0x00000004; finalizeTime_ = value; onChanged(); return this; } /** * required uint64 finalizeTime = 3; */ public Builder clearFinalizeTime() { bitField0_ = (bitField0_ & ~0x00000004); finalizeTime_ = 0L; onChanged(); return this; } // required bool createdRollbackImages = 4; private boolean createdRollbackImages_ ; /** * required bool createdRollbackImages = 4; */ public boolean hasCreatedRollbackImages() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bool createdRollbackImages = 4; */ public boolean getCreatedRollbackImages() { return createdRollbackImages_; } /** * required bool createdRollbackImages = 4; */ public Builder setCreatedRollbackImages(boolean value) { bitField0_ |= 0x00000008; createdRollbackImages_ = value; onChanged(); return this; } /** * required bool createdRollbackImages = 4; */ public Builder clearCreatedRollbackImages() { bitField0_ = (bitField0_ & ~0x00000008); createdRollbackImages_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RollingUpgradeInfoProto) } static { defaultInstance = new RollingUpgradeInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RollingUpgradeInfoProto) } public interface RollingUpgradeResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ boolean hasRollingUpgradeInfo(); /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto getRollingUpgradeInfo(); /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProtoOrBuilder getRollingUpgradeInfoOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.RollingUpgradeResponseProto} */ public static final class RollingUpgradeResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RollingUpgradeResponseProtoOrBuilder { // Use RollingUpgradeResponseProto.newBuilder() to construct. private RollingUpgradeResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RollingUpgradeResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RollingUpgradeResponseProto defaultInstance; public static RollingUpgradeResponseProto getDefaultInstance() { return defaultInstance; } public RollingUpgradeResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RollingUpgradeResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = rollingUpgradeInfo_.toBuilder(); } rollingUpgradeInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(rollingUpgradeInfo_); rollingUpgradeInfo_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RollingUpgradeResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RollingUpgradeResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; public static final int ROLLINGUPGRADEINFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto rollingUpgradeInfo_; /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ public boolean hasRollingUpgradeInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto getRollingUpgradeInfo() { return rollingUpgradeInfo_; } /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProtoOrBuilder getRollingUpgradeInfoOrBuilder() { return rollingUpgradeInfo_; } private void initFields() { rollingUpgradeInfo_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (hasRollingUpgradeInfo()) { if (!getRollingUpgradeInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, rollingUpgradeInfo_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, rollingUpgradeInfo_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto) obj; boolean result = true; result = result && (hasRollingUpgradeInfo() == other.hasRollingUpgradeInfo()); if (hasRollingUpgradeInfo()) { result = result && getRollingUpgradeInfo() .equals(other.getRollingUpgradeInfo()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasRollingUpgradeInfo()) { hash = (37 * hash) + ROLLINGUPGRADEINFO_FIELD_NUMBER; hash = (53 * hash) + getRollingUpgradeInfo().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RollingUpgradeResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getRollingUpgradeInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (rollingUpgradeInfoBuilder_ == null) { rollingUpgradeInfo_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.getDefaultInstance(); } else { rollingUpgradeInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RollingUpgradeResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (rollingUpgradeInfoBuilder_ == null) { result.rollingUpgradeInfo_ = rollingUpgradeInfo_; } else { result.rollingUpgradeInfo_ = rollingUpgradeInfoBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.getDefaultInstance()) return this; if (other.hasRollingUpgradeInfo()) { mergeRollingUpgradeInfo(other.getRollingUpgradeInfo()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (hasRollingUpgradeInfo()) { if (!getRollingUpgradeInfo().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto rollingUpgradeInfo_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProtoOrBuilder> rollingUpgradeInfoBuilder_; /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ public boolean hasRollingUpgradeInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto getRollingUpgradeInfo() { if (rollingUpgradeInfoBuilder_ == null) { return rollingUpgradeInfo_; } else { return rollingUpgradeInfoBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ public Builder setRollingUpgradeInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto value) { if (rollingUpgradeInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } rollingUpgradeInfo_ = value; onChanged(); } else { rollingUpgradeInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ public Builder setRollingUpgradeInfo( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.Builder builderForValue) { if (rollingUpgradeInfoBuilder_ == null) { rollingUpgradeInfo_ = builderForValue.build(); onChanged(); } else { rollingUpgradeInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ public Builder mergeRollingUpgradeInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto value) { if (rollingUpgradeInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && rollingUpgradeInfo_ != org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.getDefaultInstance()) { rollingUpgradeInfo_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.newBuilder(rollingUpgradeInfo_).mergeFrom(value).buildPartial(); } else { rollingUpgradeInfo_ = value; } onChanged(); } else { rollingUpgradeInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ public Builder clearRollingUpgradeInfo() { if (rollingUpgradeInfoBuilder_ == null) { rollingUpgradeInfo_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.getDefaultInstance(); onChanged(); } else { rollingUpgradeInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.Builder getRollingUpgradeInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getRollingUpgradeInfoFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProtoOrBuilder getRollingUpgradeInfoOrBuilder() { if (rollingUpgradeInfoBuilder_ != null) { return rollingUpgradeInfoBuilder_.getMessageOrBuilder(); } else { return rollingUpgradeInfo_; } } /** * optional .hadoop.hdfs.RollingUpgradeInfoProto rollingUpgradeInfo = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProtoOrBuilder> getRollingUpgradeInfoFieldBuilder() { if (rollingUpgradeInfoBuilder_ == null) { rollingUpgradeInfoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProtoOrBuilder>( rollingUpgradeInfo_, getParentForChildren(), isClean()); rollingUpgradeInfo_ = null; } return rollingUpgradeInfoBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RollingUpgradeResponseProto) } static { defaultInstance = new RollingUpgradeResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RollingUpgradeResponseProto) } public interface ListCorruptFileBlocksRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string path = 1; /** * required string path = 1; */ boolean hasPath(); /** * required string path = 1; */ java.lang.String getPath(); /** * required string path = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes(); // optional string cookie = 2; /** * optional string cookie = 2; */ boolean hasCookie(); /** * optional string cookie = 2; */ java.lang.String getCookie(); /** * optional string cookie = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getCookieBytes(); } /** * Protobuf type {@code hadoop.hdfs.ListCorruptFileBlocksRequestProto} */ public static final class ListCorruptFileBlocksRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ListCorruptFileBlocksRequestProtoOrBuilder { // Use ListCorruptFileBlocksRequestProto.newBuilder() to construct. private ListCorruptFileBlocksRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ListCorruptFileBlocksRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ListCorruptFileBlocksRequestProto defaultInstance; public static ListCorruptFileBlocksRequestProto getDefaultInstance() { return defaultInstance; } public ListCorruptFileBlocksRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ListCorruptFileBlocksRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; path_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; cookie_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCorruptFileBlocksRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCorruptFileBlocksRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ListCorruptFileBlocksRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ListCorruptFileBlocksRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string path = 1; public static final int PATH_FIELD_NUMBER = 1; private java.lang.Object path_; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional string cookie = 2; public static final int COOKIE_FIELD_NUMBER = 2; private java.lang.Object cookie_; /** * optional string cookie = 2; */ public boolean hasCookie() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string cookie = 2; */ public java.lang.String getCookie() { java.lang.Object ref = cookie_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { cookie_ = s; } return s; } } /** * optional string cookie = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getCookieBytes() { java.lang.Object ref = cookie_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); cookie_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { path_ = ""; cookie_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPath()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getPathBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getCookieBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getPathBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getCookieBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto) obj; boolean result = true; result = result && (hasPath() == other.hasPath()); if (hasPath()) { result = result && getPath() .equals(other.getPath()); } result = result && (hasCookie() == other.hasCookie()); if (hasCookie()) { result = result && getCookie() .equals(other.getCookie()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasCookie()) { hash = (37 * hash) + COOKIE_FIELD_NUMBER; hash = (53 * hash) + getCookie().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ListCorruptFileBlocksRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCorruptFileBlocksRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCorruptFileBlocksRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); cookie_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCorruptFileBlocksRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.path_ = path_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.cookie_ = cookie_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.getDefaultInstance()) return this; if (other.hasPath()) { bitField0_ |= 0x00000001; path_ = other.path_; onChanged(); } if (other.hasCookie()) { bitField0_ |= 0x00000002; cookie_ = other.cookie_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPath()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string path = 1; private java.lang.Object path_ = ""; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); path_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string path = 1; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } /** * required string path = 1; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000001); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * required string path = 1; */ public Builder setPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } // optional string cookie = 2; private java.lang.Object cookie_ = ""; /** * optional string cookie = 2; */ public boolean hasCookie() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string cookie = 2; */ public java.lang.String getCookie() { java.lang.Object ref = cookie_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); cookie_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string cookie = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getCookieBytes() { java.lang.Object ref = cookie_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); cookie_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string cookie = 2; */ public Builder setCookie( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cookie_ = value; onChanged(); return this; } /** * optional string cookie = 2; */ public Builder clearCookie() { bitField0_ = (bitField0_ & ~0x00000002); cookie_ = getDefaultInstance().getCookie(); onChanged(); return this; } /** * optional string cookie = 2; */ public Builder setCookieBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; cookie_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ListCorruptFileBlocksRequestProto) } static { defaultInstance = new ListCorruptFileBlocksRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ListCorruptFileBlocksRequestProto) } public interface ListCorruptFileBlocksResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ boolean hasCorrupt(); /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getCorrupt(); /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder getCorruptOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.ListCorruptFileBlocksResponseProto} */ public static final class ListCorruptFileBlocksResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ListCorruptFileBlocksResponseProtoOrBuilder { // Use ListCorruptFileBlocksResponseProto.newBuilder() to construct. private ListCorruptFileBlocksResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ListCorruptFileBlocksResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ListCorruptFileBlocksResponseProto defaultInstance; public static ListCorruptFileBlocksResponseProto getDefaultInstance() { return defaultInstance; } public ListCorruptFileBlocksResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ListCorruptFileBlocksResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = corrupt_.toBuilder(); } corrupt_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(corrupt_); corrupt_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCorruptFileBlocksResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCorruptFileBlocksResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ListCorruptFileBlocksResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ListCorruptFileBlocksResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; public static final int CORRUPT_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto corrupt_; /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ public boolean hasCorrupt() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getCorrupt() { return corrupt_; } /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder getCorruptOrBuilder() { return corrupt_; } private void initFields() { corrupt_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasCorrupt()) { memoizedIsInitialized = 0; return false; } if (!getCorrupt().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, corrupt_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, corrupt_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto) obj; boolean result = true; result = result && (hasCorrupt() == other.hasCorrupt()); if (hasCorrupt()) { result = result && getCorrupt() .equals(other.getCorrupt()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasCorrupt()) { hash = (37 * hash) + CORRUPT_FIELD_NUMBER; hash = (53 * hash) + getCorrupt().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ListCorruptFileBlocksResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCorruptFileBlocksResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCorruptFileBlocksResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getCorruptFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (corruptBuilder_ == null) { corrupt_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); } else { corruptBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCorruptFileBlocksResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (corruptBuilder_ == null) { result.corrupt_ = corrupt_; } else { result.corrupt_ = corruptBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance()) return this; if (other.hasCorrupt()) { mergeCorrupt(other.getCorrupt()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasCorrupt()) { return false; } if (!getCorrupt().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto corrupt_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder> corruptBuilder_; /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ public boolean hasCorrupt() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getCorrupt() { if (corruptBuilder_ == null) { return corrupt_; } else { return corruptBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ public Builder setCorrupt(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto value) { if (corruptBuilder_ == null) { if (value == null) { throw new NullPointerException(); } corrupt_ = value; onChanged(); } else { corruptBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ public Builder setCorrupt( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder builderForValue) { if (corruptBuilder_ == null) { corrupt_ = builderForValue.build(); onChanged(); } else { corruptBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ public Builder mergeCorrupt(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto value) { if (corruptBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && corrupt_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance()) { corrupt_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder(corrupt_).mergeFrom(value).buildPartial(); } else { corrupt_ = value; } onChanged(); } else { corruptBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ public Builder clearCorrupt() { if (corruptBuilder_ == null) { corrupt_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); onChanged(); } else { corruptBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder getCorruptBuilder() { bitField0_ |= 0x00000001; onChanged(); return getCorruptFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder getCorruptOrBuilder() { if (corruptBuilder_ != null) { return corruptBuilder_.getMessageOrBuilder(); } else { return corrupt_; } } /** * required .hadoop.hdfs.CorruptFileBlocksProto corrupt = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder> getCorruptFieldBuilder() { if (corruptBuilder_ == null) { corruptBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder>( corrupt_, getParentForChildren(), isClean()); corrupt_ = null; } return corruptBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ListCorruptFileBlocksResponseProto) } static { defaultInstance = new ListCorruptFileBlocksResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ListCorruptFileBlocksResponseProto) } public interface MetaSaveRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string filename = 1; /** * required string filename = 1; */ boolean hasFilename(); /** * required string filename = 1; */ java.lang.String getFilename(); /** * required string filename = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFilenameBytes(); } /** * Protobuf type {@code hadoop.hdfs.MetaSaveRequestProto} */ public static final class MetaSaveRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements MetaSaveRequestProtoOrBuilder { // Use MetaSaveRequestProto.newBuilder() to construct. private MetaSaveRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private MetaSaveRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final MetaSaveRequestProto defaultInstance; public static MetaSaveRequestProto getDefaultInstance() { return defaultInstance; } public MetaSaveRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private MetaSaveRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; filename_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MetaSaveRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MetaSaveRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public MetaSaveRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new MetaSaveRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string filename = 1; public static final int FILENAME_FIELD_NUMBER = 1; private java.lang.Object filename_; /** * required string filename = 1; */ public boolean hasFilename() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string filename = 1; */ public java.lang.String getFilename() { java.lang.Object ref = filename_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { filename_ = s; } return s; } } /** * required string filename = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFilenameBytes() { java.lang.Object ref = filename_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); filename_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { filename_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasFilename()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getFilenameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getFilenameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto) obj; boolean result = true; result = result && (hasFilename() == other.hasFilename()); if (hasFilename()) { result = result && getFilename() .equals(other.getFilename()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasFilename()) { hash = (37 * hash) + FILENAME_FIELD_NUMBER; hash = (53 * hash) + getFilename().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.MetaSaveRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MetaSaveRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MetaSaveRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); filename_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MetaSaveRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.filename_ = filename_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.getDefaultInstance()) return this; if (other.hasFilename()) { bitField0_ |= 0x00000001; filename_ = other.filename_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasFilename()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string filename = 1; private java.lang.Object filename_ = ""; /** * required string filename = 1; */ public boolean hasFilename() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string filename = 1; */ public java.lang.String getFilename() { java.lang.Object ref = filename_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); filename_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string filename = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getFilenameBytes() { java.lang.Object ref = filename_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); filename_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string filename = 1; */ public Builder setFilename( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; filename_ = value; onChanged(); return this; } /** * required string filename = 1; */ public Builder clearFilename() { bitField0_ = (bitField0_ & ~0x00000001); filename_ = getDefaultInstance().getFilename(); onChanged(); return this; } /** * required string filename = 1; */ public Builder setFilenameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; filename_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.MetaSaveRequestProto) } static { defaultInstance = new MetaSaveRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.MetaSaveRequestProto) } public interface MetaSaveResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.MetaSaveResponseProto} * *
   * void response
   * 
*/ public static final class MetaSaveResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements MetaSaveResponseProtoOrBuilder { // Use MetaSaveResponseProto.newBuilder() to construct. private MetaSaveResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private MetaSaveResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final MetaSaveResponseProto defaultInstance; public static MetaSaveResponseProto getDefaultInstance() { return defaultInstance; } public MetaSaveResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private MetaSaveResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MetaSaveResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MetaSaveResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public MetaSaveResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new MetaSaveResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.MetaSaveResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MetaSaveResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MetaSaveResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_MetaSaveResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.MetaSaveResponseProto) } static { defaultInstance = new MetaSaveResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.MetaSaveResponseProto) } public interface GetFileInfoRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); } /** * Protobuf type {@code hadoop.hdfs.GetFileInfoRequestProto} */ public static final class GetFileInfoRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetFileInfoRequestProtoOrBuilder { // Use GetFileInfoRequestProto.newBuilder() to construct. private GetFileInfoRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetFileInfoRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetFileInfoRequestProto defaultInstance; public static GetFileInfoRequestProto getDefaultInstance() { return defaultInstance; } public GetFileInfoRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFileInfoRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileInfoRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileInfoRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetFileInfoRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetFileInfoRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { src_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetFileInfoRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileInfoRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileInfoRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileInfoRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetFileInfoRequestProto) } static { defaultInstance = new GetFileInfoRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetFileInfoRequestProto) } public interface GetFileInfoResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ boolean hasFs(); /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs(); /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetFileInfoResponseProto} */ public static final class GetFileInfoResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetFileInfoResponseProtoOrBuilder { // Use GetFileInfoResponseProto.newBuilder() to construct. private GetFileInfoResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetFileInfoResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetFileInfoResponseProto defaultInstance; public static GetFileInfoResponseProto getDefaultInstance() { return defaultInstance; } public GetFileInfoResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFileInfoResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = fs_.toBuilder(); } fs_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(fs_); fs_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileInfoResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileInfoResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetFileInfoResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetFileInfoResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; public static final int FS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public boolean hasFs() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { return fs_; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { return fs_; } private void initFields() { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (hasFs()) { if (!getFs().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, fs_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, fs_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto) obj; boolean result = true; result = result && (hasFs() == other.hasFs()); if (hasFs()) { result = result && getFs() .equals(other.getFs()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasFs()) { hash = (37 * hash) + FS_FIELD_NUMBER; hash = (53 * hash) + getFs().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetFileInfoResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileInfoResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileInfoResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getFsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (fsBuilder_ == null) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } else { fsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileInfoResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (fsBuilder_ == null) { result.fs_ = fs_; } else { result.fs_ = fsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance()) return this; if (other.hasFs()) { mergeFs(other.getFs()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (hasFs()) { if (!getFs().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> fsBuilder_; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public boolean hasFs() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { if (fsBuilder_ == null) { return fs_; } else { return fsBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder setFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (fsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } fs_ = value; onChanged(); } else { fsBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder setFs( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (fsBuilder_ == null) { fs_ = builderForValue.build(); onChanged(); } else { fsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder mergeFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (fsBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && fs_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(fs_).mergeFrom(value).buildPartial(); } else { fs_ = value; } onChanged(); } else { fsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder clearFs() { if (fsBuilder_ == null) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); onChanged(); } else { fsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getFsBuilder() { bitField0_ |= 0x00000001; onChanged(); return getFsFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { if (fsBuilder_ != null) { return fsBuilder_.getMessageOrBuilder(); } else { return fs_; } } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getFsFieldBuilder() { if (fsBuilder_ == null) { fsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( fs_, getParentForChildren(), isClean()); fs_ = null; } return fsBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetFileInfoResponseProto) } static { defaultInstance = new GetFileInfoResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetFileInfoResponseProto) } public interface GetLocatedFileInfoRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional string src = 1; /** * optional string src = 1; */ boolean hasSrc(); /** * optional string src = 1; */ java.lang.String getSrc(); /** * optional string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // optional bool needBlockToken = 2 [default = false]; /** * optional bool needBlockToken = 2 [default = false]; */ boolean hasNeedBlockToken(); /** * optional bool needBlockToken = 2 [default = false]; */ boolean getNeedBlockToken(); } /** * Protobuf type {@code hadoop.hdfs.GetLocatedFileInfoRequestProto} */ public static final class GetLocatedFileInfoRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetLocatedFileInfoRequestProtoOrBuilder { // Use GetLocatedFileInfoRequestProto.newBuilder() to construct. private GetLocatedFileInfoRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetLocatedFileInfoRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetLocatedFileInfoRequestProto defaultInstance; public static GetLocatedFileInfoRequestProto getDefaultInstance() { return defaultInstance; } public GetLocatedFileInfoRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetLocatedFileInfoRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; needBlockToken_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLocatedFileInfoRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLocatedFileInfoRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetLocatedFileInfoRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetLocatedFileInfoRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * optional string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * optional string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional bool needBlockToken = 2 [default = false]; public static final int NEEDBLOCKTOKEN_FIELD_NUMBER = 2; private boolean needBlockToken_; /** * optional bool needBlockToken = 2 [default = false]; */ public boolean hasNeedBlockToken() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool needBlockToken = 2 [default = false]; */ public boolean getNeedBlockToken() { return needBlockToken_; } private void initFields() { src_ = ""; needBlockToken_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(2, needBlockToken_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(2, needBlockToken_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasNeedBlockToken() == other.hasNeedBlockToken()); if (hasNeedBlockToken()) { result = result && (getNeedBlockToken() == other.getNeedBlockToken()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasNeedBlockToken()) { hash = (37 * hash) + NEEDBLOCKTOKEN_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getNeedBlockToken()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetLocatedFileInfoRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLocatedFileInfoRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLocatedFileInfoRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); needBlockToken_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLocatedFileInfoRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.needBlockToken_ = needBlockToken_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasNeedBlockToken()) { setNeedBlockToken(other.getNeedBlockToken()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional string src = 1; private java.lang.Object src_ = ""; /** * optional string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * optional string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * optional string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // optional bool needBlockToken = 2 [default = false]; private boolean needBlockToken_ ; /** * optional bool needBlockToken = 2 [default = false]; */ public boolean hasNeedBlockToken() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool needBlockToken = 2 [default = false]; */ public boolean getNeedBlockToken() { return needBlockToken_; } /** * optional bool needBlockToken = 2 [default = false]; */ public Builder setNeedBlockToken(boolean value) { bitField0_ |= 0x00000002; needBlockToken_ = value; onChanged(); return this; } /** * optional bool needBlockToken = 2 [default = false]; */ public Builder clearNeedBlockToken() { bitField0_ = (bitField0_ & ~0x00000002); needBlockToken_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetLocatedFileInfoRequestProto) } static { defaultInstance = new GetLocatedFileInfoRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetLocatedFileInfoRequestProto) } public interface GetLocatedFileInfoResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ boolean hasFs(); /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs(); /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetLocatedFileInfoResponseProto} */ public static final class GetLocatedFileInfoResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetLocatedFileInfoResponseProtoOrBuilder { // Use GetLocatedFileInfoResponseProto.newBuilder() to construct. private GetLocatedFileInfoResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetLocatedFileInfoResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetLocatedFileInfoResponseProto defaultInstance; public static GetLocatedFileInfoResponseProto getDefaultInstance() { return defaultInstance; } public GetLocatedFileInfoResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetLocatedFileInfoResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = fs_.toBuilder(); } fs_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(fs_); fs_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLocatedFileInfoResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLocatedFileInfoResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetLocatedFileInfoResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetLocatedFileInfoResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; public static final int FS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public boolean hasFs() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { return fs_; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { return fs_; } private void initFields() { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (hasFs()) { if (!getFs().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, fs_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, fs_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto) obj; boolean result = true; result = result && (hasFs() == other.hasFs()); if (hasFs()) { result = result && getFs() .equals(other.getFs()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasFs()) { hash = (37 * hash) + FS_FIELD_NUMBER; hash = (53 * hash) + getFs().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetLocatedFileInfoResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLocatedFileInfoResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLocatedFileInfoResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getFsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (fsBuilder_ == null) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } else { fsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLocatedFileInfoResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (fsBuilder_ == null) { result.fs_ = fs_; } else { result.fs_ = fsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.getDefaultInstance()) return this; if (other.hasFs()) { mergeFs(other.getFs()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (hasFs()) { if (!getFs().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> fsBuilder_; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public boolean hasFs() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { if (fsBuilder_ == null) { return fs_; } else { return fsBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder setFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (fsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } fs_ = value; onChanged(); } else { fsBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder setFs( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (fsBuilder_ == null) { fs_ = builderForValue.build(); onChanged(); } else { fsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder mergeFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (fsBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && fs_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(fs_).mergeFrom(value).buildPartial(); } else { fs_ = value; } onChanged(); } else { fsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder clearFs() { if (fsBuilder_ == null) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); onChanged(); } else { fsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getFsBuilder() { bitField0_ |= 0x00000001; onChanged(); return getFsFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { if (fsBuilder_ != null) { return fsBuilder_.getMessageOrBuilder(); } else { return fs_; } } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getFsFieldBuilder() { if (fsBuilder_ == null) { fsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( fs_, getParentForChildren(), isClean()); fs_ = null; } return fsBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetLocatedFileInfoResponseProto) } static { defaultInstance = new GetLocatedFileInfoResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetLocatedFileInfoResponseProto) } public interface IsFileClosedRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); } /** * Protobuf type {@code hadoop.hdfs.IsFileClosedRequestProto} */ public static final class IsFileClosedRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements IsFileClosedRequestProtoOrBuilder { // Use IsFileClosedRequestProto.newBuilder() to construct. private IsFileClosedRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private IsFileClosedRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final IsFileClosedRequestProto defaultInstance; public static IsFileClosedRequestProto getDefaultInstance() { return defaultInstance; } public IsFileClosedRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private IsFileClosedRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_IsFileClosedRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_IsFileClosedRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public IsFileClosedRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new IsFileClosedRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { src_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.IsFileClosedRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_IsFileClosedRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_IsFileClosedRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_IsFileClosedRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFileClosedRequestProto) } static { defaultInstance = new IsFileClosedRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFileClosedRequestProto) } public interface IsFileClosedResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required bool result = 1; /** * required bool result = 1; */ boolean hasResult(); /** * required bool result = 1; */ boolean getResult(); } /** * Protobuf type {@code hadoop.hdfs.IsFileClosedResponseProto} */ public static final class IsFileClosedResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements IsFileClosedResponseProtoOrBuilder { // Use IsFileClosedResponseProto.newBuilder() to construct. private IsFileClosedResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private IsFileClosedResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final IsFileClosedResponseProto defaultInstance; public static IsFileClosedResponseProto getDefaultInstance() { return defaultInstance; } public IsFileClosedResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private IsFileClosedResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; result_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_IsFileClosedResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_IsFileClosedResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public IsFileClosedResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new IsFileClosedResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required bool result = 1; public static final int RESULT_FIELD_NUMBER = 1; private boolean result_; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } private void initFields() { result_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasResult()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, result_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(1, result_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto) obj; boolean result = true; result = result && (hasResult() == other.hasResult()); if (hasResult()) { result = result && (getResult() == other.getResult()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getResult()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.IsFileClosedResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_IsFileClosedResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_IsFileClosedResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); result_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_IsFileClosedResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.result_ = result_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.getDefaultInstance()) return this; if (other.hasResult()) { setResult(other.getResult()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasResult()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bool result = 1; private boolean result_ ; /** * required bool result = 1; */ public boolean hasResult() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool result = 1; */ public boolean getResult() { return result_; } /** * required bool result = 1; */ public Builder setResult(boolean value) { bitField0_ |= 0x00000001; result_ = value; onChanged(); return this; } /** * required bool result = 1; */ public Builder clearResult() { bitField0_ = (bitField0_ & ~0x00000001); result_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFileClosedResponseProto) } static { defaultInstance = new IsFileClosedResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFileClosedResponseProto) } public interface CacheDirectiveInfoProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional int64 id = 1; /** * optional int64 id = 1; */ boolean hasId(); /** * optional int64 id = 1; */ long getId(); // optional string path = 2; /** * optional string path = 2; */ boolean hasPath(); /** * optional string path = 2; */ java.lang.String getPath(); /** * optional string path = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes(); // optional uint32 replication = 3; /** * optional uint32 replication = 3; */ boolean hasReplication(); /** * optional uint32 replication = 3; */ int getReplication(); // optional string pool = 4; /** * optional string pool = 4; */ boolean hasPool(); /** * optional string pool = 4; */ java.lang.String getPool(); /** * optional string pool = 4; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPoolBytes(); // optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ boolean hasExpiration(); /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto getExpiration(); /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProtoOrBuilder getExpirationOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.CacheDirectiveInfoProto} */ public static final class CacheDirectiveInfoProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CacheDirectiveInfoProtoOrBuilder { // Use CacheDirectiveInfoProto.newBuilder() to construct. private CacheDirectiveInfoProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CacheDirectiveInfoProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CacheDirectiveInfoProto defaultInstance; public static CacheDirectiveInfoProto getDefaultInstance() { return defaultInstance; } public CacheDirectiveInfoProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CacheDirectiveInfoProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; id_ = input.readInt64(); break; } case 18: { bitField0_ |= 0x00000002; path_ = input.readBytes(); break; } case 24: { bitField0_ |= 0x00000004; replication_ = input.readUInt32(); break; } case 34: { bitField0_ |= 0x00000008; pool_ = input.readBytes(); break; } case 42: { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.Builder subBuilder = null; if (((bitField0_ & 0x00000010) == 0x00000010)) { subBuilder = expiration_.toBuilder(); } expiration_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(expiration_); expiration_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000010; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveInfoProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CacheDirectiveInfoProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CacheDirectiveInfoProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional int64 id = 1; public static final int ID_FIELD_NUMBER = 1; private long id_; /** * optional int64 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int64 id = 1; */ public long getId() { return id_; } // optional string path = 2; public static final int PATH_FIELD_NUMBER = 2; private java.lang.Object path_; /** * optional string path = 2; */ public boolean hasPath() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string path = 2; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * optional string path = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional uint32 replication = 3; public static final int REPLICATION_FIELD_NUMBER = 3; private int replication_; /** * optional uint32 replication = 3; */ public boolean hasReplication() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint32 replication = 3; */ public int getReplication() { return replication_; } // optional string pool = 4; public static final int POOL_FIELD_NUMBER = 4; private java.lang.Object pool_; /** * optional string pool = 4; */ public boolean hasPool() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional string pool = 4; */ public java.lang.String getPool() { java.lang.Object ref = pool_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { pool_ = s; } return s; } } /** * optional string pool = 4; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPoolBytes() { java.lang.Object ref = pool_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); pool_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; public static final int EXPIRATION_FIELD_NUMBER = 5; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto expiration_; /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ public boolean hasExpiration() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto getExpiration() { return expiration_; } /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProtoOrBuilder getExpirationOrBuilder() { return expiration_; } private void initFields() { id_ = 0L; path_ = ""; replication_ = 0; pool_ = ""; expiration_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (hasExpiration()) { if (!getExpiration().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, id_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getPathBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt32(3, replication_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, getPoolBytes()); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeMessage(5, expiration_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, id_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getPathBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt32Size(3, replication_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(4, getPoolBytes()); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(5, expiration_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto) obj; boolean result = true; result = result && (hasId() == other.hasId()); if (hasId()) { result = result && (getId() == other.getId()); } result = result && (hasPath() == other.hasPath()); if (hasPath()) { result = result && getPath() .equals(other.getPath()); } result = result && (hasReplication() == other.hasReplication()); if (hasReplication()) { result = result && (getReplication() == other.getReplication()); } result = result && (hasPool() == other.hasPool()); if (hasPool()) { result = result && getPool() .equals(other.getPool()); } result = result && (hasExpiration() == other.hasExpiration()); if (hasExpiration()) { result = result && getExpiration() .equals(other.getExpiration()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getId()); } if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasReplication()) { hash = (37 * hash) + REPLICATION_FIELD_NUMBER; hash = (53 * hash) + getReplication(); } if (hasPool()) { hash = (37 * hash) + POOL_FIELD_NUMBER; hash = (53 * hash) + getPool().hashCode(); } if (hasExpiration()) { hash = (37 * hash) + EXPIRATION_FIELD_NUMBER; hash = (53 * hash) + getExpiration().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CacheDirectiveInfoProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveInfoProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getExpirationFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); id_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); path_ = ""; bitField0_ = (bitField0_ & ~0x00000002); replication_ = 0; bitField0_ = (bitField0_ & ~0x00000004); pool_ = ""; bitField0_ = (bitField0_ & ~0x00000008); if (expirationBuilder_ == null) { expiration_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.getDefaultInstance(); } else { expirationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.id_ = id_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.path_ = path_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.replication_ = replication_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.pool_ = pool_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } if (expirationBuilder_ == null) { result.expiration_ = expiration_; } else { result.expiration_ = expirationBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance()) return this; if (other.hasId()) { setId(other.getId()); } if (other.hasPath()) { bitField0_ |= 0x00000002; path_ = other.path_; onChanged(); } if (other.hasReplication()) { setReplication(other.getReplication()); } if (other.hasPool()) { bitField0_ |= 0x00000008; pool_ = other.pool_; onChanged(); } if (other.hasExpiration()) { mergeExpiration(other.getExpiration()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (hasExpiration()) { if (!getExpiration().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional int64 id = 1; private long id_ ; /** * optional int64 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int64 id = 1; */ public long getId() { return id_; } /** * optional int64 id = 1; */ public Builder setId(long value) { bitField0_ |= 0x00000001; id_ = value; onChanged(); return this; } /** * optional int64 id = 1; */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000001); id_ = 0L; onChanged(); return this; } // optional string path = 2; private java.lang.Object path_ = ""; /** * optional string path = 2; */ public boolean hasPath() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string path = 2; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); path_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string path = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string path = 2; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; path_ = value; onChanged(); return this; } /** * optional string path = 2; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000002); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * optional string path = 2; */ public Builder setPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; path_ = value; onChanged(); return this; } // optional uint32 replication = 3; private int replication_ ; /** * optional uint32 replication = 3; */ public boolean hasReplication() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint32 replication = 3; */ public int getReplication() { return replication_; } /** * optional uint32 replication = 3; */ public Builder setReplication(int value) { bitField0_ |= 0x00000004; replication_ = value; onChanged(); return this; } /** * optional uint32 replication = 3; */ public Builder clearReplication() { bitField0_ = (bitField0_ & ~0x00000004); replication_ = 0; onChanged(); return this; } // optional string pool = 4; private java.lang.Object pool_ = ""; /** * optional string pool = 4; */ public boolean hasPool() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional string pool = 4; */ public java.lang.String getPool() { java.lang.Object ref = pool_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); pool_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string pool = 4; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPoolBytes() { java.lang.Object ref = pool_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); pool_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string pool = 4; */ public Builder setPool( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; pool_ = value; onChanged(); return this; } /** * optional string pool = 4; */ public Builder clearPool() { bitField0_ = (bitField0_ & ~0x00000008); pool_ = getDefaultInstance().getPool(); onChanged(); return this; } /** * optional string pool = 4; */ public Builder setPoolBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; pool_ = value; onChanged(); return this; } // optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto expiration_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProtoOrBuilder> expirationBuilder_; /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ public boolean hasExpiration() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto getExpiration() { if (expirationBuilder_ == null) { return expiration_; } else { return expirationBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ public Builder setExpiration(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto value) { if (expirationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } expiration_ = value; onChanged(); } else { expirationBuilder_.setMessage(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ public Builder setExpiration( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.Builder builderForValue) { if (expirationBuilder_ == null) { expiration_ = builderForValue.build(); onChanged(); } else { expirationBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ public Builder mergeExpiration(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto value) { if (expirationBuilder_ == null) { if (((bitField0_ & 0x00000010) == 0x00000010) && expiration_ != org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.getDefaultInstance()) { expiration_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.newBuilder(expiration_).mergeFrom(value).buildPartial(); } else { expiration_ = value; } onChanged(); } else { expirationBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ public Builder clearExpiration() { if (expirationBuilder_ == null) { expiration_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.getDefaultInstance(); onChanged(); } else { expirationBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000010); return this; } /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.Builder getExpirationBuilder() { bitField0_ |= 0x00000010; onChanged(); return getExpirationFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProtoOrBuilder getExpirationOrBuilder() { if (expirationBuilder_ != null) { return expirationBuilder_.getMessageOrBuilder(); } else { return expiration_; } } /** * optional .hadoop.hdfs.CacheDirectiveInfoExpirationProto expiration = 5; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProtoOrBuilder> getExpirationFieldBuilder() { if (expirationBuilder_ == null) { expirationBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProtoOrBuilder>( expiration_, getParentForChildren(), isClean()); expiration_ = null; } return expirationBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CacheDirectiveInfoProto) } static { defaultInstance = new CacheDirectiveInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CacheDirectiveInfoProto) } public interface CacheDirectiveInfoExpirationProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required int64 millis = 1; /** * required int64 millis = 1; */ boolean hasMillis(); /** * required int64 millis = 1; */ long getMillis(); // required bool isRelative = 2; /** * required bool isRelative = 2; */ boolean hasIsRelative(); /** * required bool isRelative = 2; */ boolean getIsRelative(); } /** * Protobuf type {@code hadoop.hdfs.CacheDirectiveInfoExpirationProto} */ public static final class CacheDirectiveInfoExpirationProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CacheDirectiveInfoExpirationProtoOrBuilder { // Use CacheDirectiveInfoExpirationProto.newBuilder() to construct. private CacheDirectiveInfoExpirationProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CacheDirectiveInfoExpirationProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CacheDirectiveInfoExpirationProto defaultInstance; public static CacheDirectiveInfoExpirationProto getDefaultInstance() { return defaultInstance; } public CacheDirectiveInfoExpirationProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CacheDirectiveInfoExpirationProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; millis_ = input.readInt64(); break; } case 16: { bitField0_ |= 0x00000002; isRelative_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveInfoExpirationProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveInfoExpirationProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CacheDirectiveInfoExpirationProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CacheDirectiveInfoExpirationProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 millis = 1; public static final int MILLIS_FIELD_NUMBER = 1; private long millis_; /** * required int64 millis = 1; */ public boolean hasMillis() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 millis = 1; */ public long getMillis() { return millis_; } // required bool isRelative = 2; public static final int ISRELATIVE_FIELD_NUMBER = 2; private boolean isRelative_; /** * required bool isRelative = 2; */ public boolean hasIsRelative() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bool isRelative = 2; */ public boolean getIsRelative() { return isRelative_; } private void initFields() { millis_ = 0L; isRelative_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasMillis()) { memoizedIsInitialized = 0; return false; } if (!hasIsRelative()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, millis_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(2, isRelative_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, millis_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(2, isRelative_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto) obj; boolean result = true; result = result && (hasMillis() == other.hasMillis()); if (hasMillis()) { result = result && (getMillis() == other.getMillis()); } result = result && (hasIsRelative() == other.hasIsRelative()); if (hasIsRelative()) { result = result && (getIsRelative() == other.getIsRelative()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasMillis()) { hash = (37 * hash) + MILLIS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMillis()); } if (hasIsRelative()) { hash = (37 * hash) + ISRELATIVE_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getIsRelative()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CacheDirectiveInfoExpirationProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveInfoExpirationProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveInfoExpirationProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); millis_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); isRelative_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveInfoExpirationProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.millis_ = millis_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.isRelative_ = isRelative_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto.getDefaultInstance()) return this; if (other.hasMillis()) { setMillis(other.getMillis()); } if (other.hasIsRelative()) { setIsRelative(other.getIsRelative()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasMillis()) { return false; } if (!hasIsRelative()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 millis = 1; private long millis_ ; /** * required int64 millis = 1; */ public boolean hasMillis() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 millis = 1; */ public long getMillis() { return millis_; } /** * required int64 millis = 1; */ public Builder setMillis(long value) { bitField0_ |= 0x00000001; millis_ = value; onChanged(); return this; } /** * required int64 millis = 1; */ public Builder clearMillis() { bitField0_ = (bitField0_ & ~0x00000001); millis_ = 0L; onChanged(); return this; } // required bool isRelative = 2; private boolean isRelative_ ; /** * required bool isRelative = 2; */ public boolean hasIsRelative() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bool isRelative = 2; */ public boolean getIsRelative() { return isRelative_; } /** * required bool isRelative = 2; */ public Builder setIsRelative(boolean value) { bitField0_ |= 0x00000002; isRelative_ = value; onChanged(); return this; } /** * required bool isRelative = 2; */ public Builder clearIsRelative() { bitField0_ = (bitField0_ & ~0x00000002); isRelative_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CacheDirectiveInfoExpirationProto) } static { defaultInstance = new CacheDirectiveInfoExpirationProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CacheDirectiveInfoExpirationProto) } public interface CacheDirectiveStatsProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required int64 bytesNeeded = 1; /** * required int64 bytesNeeded = 1; */ boolean hasBytesNeeded(); /** * required int64 bytesNeeded = 1; */ long getBytesNeeded(); // required int64 bytesCached = 2; /** * required int64 bytesCached = 2; */ boolean hasBytesCached(); /** * required int64 bytesCached = 2; */ long getBytesCached(); // required int64 filesNeeded = 3; /** * required int64 filesNeeded = 3; */ boolean hasFilesNeeded(); /** * required int64 filesNeeded = 3; */ long getFilesNeeded(); // required int64 filesCached = 4; /** * required int64 filesCached = 4; */ boolean hasFilesCached(); /** * required int64 filesCached = 4; */ long getFilesCached(); // required bool hasExpired = 5; /** * required bool hasExpired = 5; */ boolean hasHasExpired(); /** * required bool hasExpired = 5; */ boolean getHasExpired(); } /** * Protobuf type {@code hadoop.hdfs.CacheDirectiveStatsProto} */ public static final class CacheDirectiveStatsProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CacheDirectiveStatsProtoOrBuilder { // Use CacheDirectiveStatsProto.newBuilder() to construct. private CacheDirectiveStatsProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CacheDirectiveStatsProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CacheDirectiveStatsProto defaultInstance; public static CacheDirectiveStatsProto getDefaultInstance() { return defaultInstance; } public CacheDirectiveStatsProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CacheDirectiveStatsProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; bytesNeeded_ = input.readInt64(); break; } case 16: { bitField0_ |= 0x00000002; bytesCached_ = input.readInt64(); break; } case 24: { bitField0_ |= 0x00000004; filesNeeded_ = input.readInt64(); break; } case 32: { bitField0_ |= 0x00000008; filesCached_ = input.readInt64(); break; } case 40: { bitField0_ |= 0x00000010; hasExpired_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveStatsProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveStatsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CacheDirectiveStatsProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CacheDirectiveStatsProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 bytesNeeded = 1; public static final int BYTESNEEDED_FIELD_NUMBER = 1; private long bytesNeeded_; /** * required int64 bytesNeeded = 1; */ public boolean hasBytesNeeded() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 bytesNeeded = 1; */ public long getBytesNeeded() { return bytesNeeded_; } // required int64 bytesCached = 2; public static final int BYTESCACHED_FIELD_NUMBER = 2; private long bytesCached_; /** * required int64 bytesCached = 2; */ public boolean hasBytesCached() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required int64 bytesCached = 2; */ public long getBytesCached() { return bytesCached_; } // required int64 filesNeeded = 3; public static final int FILESNEEDED_FIELD_NUMBER = 3; private long filesNeeded_; /** * required int64 filesNeeded = 3; */ public boolean hasFilesNeeded() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required int64 filesNeeded = 3; */ public long getFilesNeeded() { return filesNeeded_; } // required int64 filesCached = 4; public static final int FILESCACHED_FIELD_NUMBER = 4; private long filesCached_; /** * required int64 filesCached = 4; */ public boolean hasFilesCached() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required int64 filesCached = 4; */ public long getFilesCached() { return filesCached_; } // required bool hasExpired = 5; public static final int HASEXPIRED_FIELD_NUMBER = 5; private boolean hasExpired_; /** * required bool hasExpired = 5; */ public boolean hasHasExpired() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required bool hasExpired = 5; */ public boolean getHasExpired() { return hasExpired_; } private void initFields() { bytesNeeded_ = 0L; bytesCached_ = 0L; filesNeeded_ = 0L; filesCached_ = 0L; hasExpired_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBytesNeeded()) { memoizedIsInitialized = 0; return false; } if (!hasBytesCached()) { memoizedIsInitialized = 0; return false; } if (!hasFilesNeeded()) { memoizedIsInitialized = 0; return false; } if (!hasFilesCached()) { memoizedIsInitialized = 0; return false; } if (!hasHasExpired()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, bytesNeeded_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt64(2, bytesCached_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt64(3, filesNeeded_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeInt64(4, filesCached_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeBool(5, hasExpired_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, bytesNeeded_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(2, bytesCached_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(3, filesNeeded_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(4, filesCached_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(5, hasExpired_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto) obj; boolean result = true; result = result && (hasBytesNeeded() == other.hasBytesNeeded()); if (hasBytesNeeded()) { result = result && (getBytesNeeded() == other.getBytesNeeded()); } result = result && (hasBytesCached() == other.hasBytesCached()); if (hasBytesCached()) { result = result && (getBytesCached() == other.getBytesCached()); } result = result && (hasFilesNeeded() == other.hasFilesNeeded()); if (hasFilesNeeded()) { result = result && (getFilesNeeded() == other.getFilesNeeded()); } result = result && (hasFilesCached() == other.hasFilesCached()); if (hasFilesCached()) { result = result && (getFilesCached() == other.getFilesCached()); } result = result && (hasHasExpired() == other.hasHasExpired()); if (hasHasExpired()) { result = result && (getHasExpired() == other.getHasExpired()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBytesNeeded()) { hash = (37 * hash) + BYTESNEEDED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBytesNeeded()); } if (hasBytesCached()) { hash = (37 * hash) + BYTESCACHED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBytesCached()); } if (hasFilesNeeded()) { hash = (37 * hash) + FILESNEEDED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFilesNeeded()); } if (hasFilesCached()) { hash = (37 * hash) + FILESCACHED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFilesCached()); } if (hasHasExpired()) { hash = (37 * hash) + HASEXPIRED_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getHasExpired()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CacheDirectiveStatsProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveStatsProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveStatsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); bytesNeeded_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); bytesCached_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); filesNeeded_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); filesCached_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); hasExpired_ = false; bitField0_ = (bitField0_ & ~0x00000010); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveStatsProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.bytesNeeded_ = bytesNeeded_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.bytesCached_ = bytesCached_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.filesNeeded_ = filesNeeded_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.filesCached_ = filesCached_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.hasExpired_ = hasExpired_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.getDefaultInstance()) return this; if (other.hasBytesNeeded()) { setBytesNeeded(other.getBytesNeeded()); } if (other.hasBytesCached()) { setBytesCached(other.getBytesCached()); } if (other.hasFilesNeeded()) { setFilesNeeded(other.getFilesNeeded()); } if (other.hasFilesCached()) { setFilesCached(other.getFilesCached()); } if (other.hasHasExpired()) { setHasExpired(other.getHasExpired()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBytesNeeded()) { return false; } if (!hasBytesCached()) { return false; } if (!hasFilesNeeded()) { return false; } if (!hasFilesCached()) { return false; } if (!hasHasExpired()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 bytesNeeded = 1; private long bytesNeeded_ ; /** * required int64 bytesNeeded = 1; */ public boolean hasBytesNeeded() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 bytesNeeded = 1; */ public long getBytesNeeded() { return bytesNeeded_; } /** * required int64 bytesNeeded = 1; */ public Builder setBytesNeeded(long value) { bitField0_ |= 0x00000001; bytesNeeded_ = value; onChanged(); return this; } /** * required int64 bytesNeeded = 1; */ public Builder clearBytesNeeded() { bitField0_ = (bitField0_ & ~0x00000001); bytesNeeded_ = 0L; onChanged(); return this; } // required int64 bytesCached = 2; private long bytesCached_ ; /** * required int64 bytesCached = 2; */ public boolean hasBytesCached() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required int64 bytesCached = 2; */ public long getBytesCached() { return bytesCached_; } /** * required int64 bytesCached = 2; */ public Builder setBytesCached(long value) { bitField0_ |= 0x00000002; bytesCached_ = value; onChanged(); return this; } /** * required int64 bytesCached = 2; */ public Builder clearBytesCached() { bitField0_ = (bitField0_ & ~0x00000002); bytesCached_ = 0L; onChanged(); return this; } // required int64 filesNeeded = 3; private long filesNeeded_ ; /** * required int64 filesNeeded = 3; */ public boolean hasFilesNeeded() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required int64 filesNeeded = 3; */ public long getFilesNeeded() { return filesNeeded_; } /** * required int64 filesNeeded = 3; */ public Builder setFilesNeeded(long value) { bitField0_ |= 0x00000004; filesNeeded_ = value; onChanged(); return this; } /** * required int64 filesNeeded = 3; */ public Builder clearFilesNeeded() { bitField0_ = (bitField0_ & ~0x00000004); filesNeeded_ = 0L; onChanged(); return this; } // required int64 filesCached = 4; private long filesCached_ ; /** * required int64 filesCached = 4; */ public boolean hasFilesCached() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required int64 filesCached = 4; */ public long getFilesCached() { return filesCached_; } /** * required int64 filesCached = 4; */ public Builder setFilesCached(long value) { bitField0_ |= 0x00000008; filesCached_ = value; onChanged(); return this; } /** * required int64 filesCached = 4; */ public Builder clearFilesCached() { bitField0_ = (bitField0_ & ~0x00000008); filesCached_ = 0L; onChanged(); return this; } // required bool hasExpired = 5; private boolean hasExpired_ ; /** * required bool hasExpired = 5; */ public boolean hasHasExpired() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required bool hasExpired = 5; */ public boolean getHasExpired() { return hasExpired_; } /** * required bool hasExpired = 5; */ public Builder setHasExpired(boolean value) { bitField0_ |= 0x00000010; hasExpired_ = value; onChanged(); return this; } /** * required bool hasExpired = 5; */ public Builder clearHasExpired() { bitField0_ = (bitField0_ & ~0x00000010); hasExpired_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CacheDirectiveStatsProto) } static { defaultInstance = new CacheDirectiveStatsProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CacheDirectiveStatsProto) } public interface AddCacheDirectiveRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ boolean hasInfo(); /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getInfo(); /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getInfoOrBuilder(); // optional uint32 cacheFlags = 2; /** * optional uint32 cacheFlags = 2; * *
     * bits set using CacheFlag
     * 
*/ boolean hasCacheFlags(); /** * optional uint32 cacheFlags = 2; * *
     * bits set using CacheFlag
     * 
*/ int getCacheFlags(); } /** * Protobuf type {@code hadoop.hdfs.AddCacheDirectiveRequestProto} */ public static final class AddCacheDirectiveRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AddCacheDirectiveRequestProtoOrBuilder { // Use AddCacheDirectiveRequestProto.newBuilder() to construct. private AddCacheDirectiveRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AddCacheDirectiveRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AddCacheDirectiveRequestProto defaultInstance; public static AddCacheDirectiveRequestProto getDefaultInstance() { return defaultInstance; } public AddCacheDirectiveRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AddCacheDirectiveRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = info_.toBuilder(); } info_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(info_); info_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; cacheFlags_ = input.readUInt32(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCacheDirectiveRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCacheDirectiveRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AddCacheDirectiveRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AddCacheDirectiveRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; public static final int INFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto info_; /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getInfo() { return info_; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getInfoOrBuilder() { return info_; } // optional uint32 cacheFlags = 2; public static final int CACHEFLAGS_FIELD_NUMBER = 2; private int cacheFlags_; /** * optional uint32 cacheFlags = 2; * *
     * bits set using CacheFlag
     * 
*/ public boolean hasCacheFlags() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional uint32 cacheFlags = 2; * *
     * bits set using CacheFlag
     * 
*/ public int getCacheFlags() { return cacheFlags_; } private void initFields() { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); cacheFlags_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasInfo()) { memoizedIsInitialized = 0; return false; } if (!getInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, info_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt32(2, cacheFlags_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, info_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt32Size(2, cacheFlags_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto) obj; boolean result = true; result = result && (hasInfo() == other.hasInfo()); if (hasInfo()) { result = result && getInfo() .equals(other.getInfo()); } result = result && (hasCacheFlags() == other.hasCacheFlags()); if (hasCacheFlags()) { result = result && (getCacheFlags() == other.getCacheFlags()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasInfo()) { hash = (37 * hash) + INFO_FIELD_NUMBER; hash = (53 * hash) + getInfo().hashCode(); } if (hasCacheFlags()) { hash = (37 * hash) + CACHEFLAGS_FIELD_NUMBER; hash = (53 * hash) + getCacheFlags(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AddCacheDirectiveRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCacheDirectiveRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCacheDirectiveRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); cacheFlags_ = 0; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCacheDirectiveRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (infoBuilder_ == null) { result.info_ = info_; } else { result.info_ = infoBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.cacheFlags_ = cacheFlags_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto.getDefaultInstance()) return this; if (other.hasInfo()) { mergeInfo(other.getInfo()); } if (other.hasCacheFlags()) { setCacheFlags(other.getCacheFlags()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasInfo()) { return false; } if (!getInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder> infoBuilder_; /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getInfo() { if (infoBuilder_ == null) { return info_; } else { return infoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder setInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto value) { if (infoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } info_ = value; onChanged(); } else { infoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder setInfo( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder builderForValue) { if (infoBuilder_ == null) { info_ = builderForValue.build(); onChanged(); } else { infoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder mergeInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto value) { if (infoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && info_ != org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance()) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.newBuilder(info_).mergeFrom(value).buildPartial(); } else { info_ = value; } onChanged(); } else { infoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder clearInfo() { if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); onChanged(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder getInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getInfoOrBuilder() { if (infoBuilder_ != null) { return infoBuilder_.getMessageOrBuilder(); } else { return info_; } } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder> getInfoFieldBuilder() { if (infoBuilder_ == null) { infoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder>( info_, getParentForChildren(), isClean()); info_ = null; } return infoBuilder_; } // optional uint32 cacheFlags = 2; private int cacheFlags_ ; /** * optional uint32 cacheFlags = 2; * *
       * bits set using CacheFlag
       * 
*/ public boolean hasCacheFlags() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional uint32 cacheFlags = 2; * *
       * bits set using CacheFlag
       * 
*/ public int getCacheFlags() { return cacheFlags_; } /** * optional uint32 cacheFlags = 2; * *
       * bits set using CacheFlag
       * 
*/ public Builder setCacheFlags(int value) { bitField0_ |= 0x00000002; cacheFlags_ = value; onChanged(); return this; } /** * optional uint32 cacheFlags = 2; * *
       * bits set using CacheFlag
       * 
*/ public Builder clearCacheFlags() { bitField0_ = (bitField0_ & ~0x00000002); cacheFlags_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AddCacheDirectiveRequestProto) } static { defaultInstance = new AddCacheDirectiveRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AddCacheDirectiveRequestProto) } public interface AddCacheDirectiveResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required int64 id = 1; /** * required int64 id = 1; */ boolean hasId(); /** * required int64 id = 1; */ long getId(); } /** * Protobuf type {@code hadoop.hdfs.AddCacheDirectiveResponseProto} */ public static final class AddCacheDirectiveResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AddCacheDirectiveResponseProtoOrBuilder { // Use AddCacheDirectiveResponseProto.newBuilder() to construct. private AddCacheDirectiveResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AddCacheDirectiveResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AddCacheDirectiveResponseProto defaultInstance; public static AddCacheDirectiveResponseProto getDefaultInstance() { return defaultInstance; } public AddCacheDirectiveResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AddCacheDirectiveResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; id_ = input.readInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCacheDirectiveResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCacheDirectiveResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AddCacheDirectiveResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AddCacheDirectiveResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 id = 1; public static final int ID_FIELD_NUMBER = 1; private long id_; /** * required int64 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 id = 1; */ public long getId() { return id_; } private void initFields() { id_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, id_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, id_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto) obj; boolean result = true; result = result && (hasId() == other.hasId()); if (hasId()) { result = result && (getId() == other.getId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AddCacheDirectiveResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCacheDirectiveResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCacheDirectiveResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); id_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCacheDirectiveResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.id_ = id_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.getDefaultInstance()) return this; if (other.hasId()) { setId(other.getId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasId()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 id = 1; private long id_ ; /** * required int64 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 id = 1; */ public long getId() { return id_; } /** * required int64 id = 1; */ public Builder setId(long value) { bitField0_ |= 0x00000001; id_ = value; onChanged(); return this; } /** * required int64 id = 1; */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000001); id_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AddCacheDirectiveResponseProto) } static { defaultInstance = new AddCacheDirectiveResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AddCacheDirectiveResponseProto) } public interface ModifyCacheDirectiveRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ boolean hasInfo(); /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getInfo(); /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getInfoOrBuilder(); // optional uint32 cacheFlags = 2; /** * optional uint32 cacheFlags = 2; * *
     * bits set using CacheFlag
     * 
*/ boolean hasCacheFlags(); /** * optional uint32 cacheFlags = 2; * *
     * bits set using CacheFlag
     * 
*/ int getCacheFlags(); } /** * Protobuf type {@code hadoop.hdfs.ModifyCacheDirectiveRequestProto} */ public static final class ModifyCacheDirectiveRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ModifyCacheDirectiveRequestProtoOrBuilder { // Use ModifyCacheDirectiveRequestProto.newBuilder() to construct. private ModifyCacheDirectiveRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ModifyCacheDirectiveRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ModifyCacheDirectiveRequestProto defaultInstance; public static ModifyCacheDirectiveRequestProto getDefaultInstance() { return defaultInstance; } public ModifyCacheDirectiveRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ModifyCacheDirectiveRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = info_.toBuilder(); } info_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(info_); info_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 16: { bitField0_ |= 0x00000002; cacheFlags_ = input.readUInt32(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCacheDirectiveRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCacheDirectiveRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ModifyCacheDirectiveRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ModifyCacheDirectiveRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; public static final int INFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto info_; /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getInfo() { return info_; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getInfoOrBuilder() { return info_; } // optional uint32 cacheFlags = 2; public static final int CACHEFLAGS_FIELD_NUMBER = 2; private int cacheFlags_; /** * optional uint32 cacheFlags = 2; * *
     * bits set using CacheFlag
     * 
*/ public boolean hasCacheFlags() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional uint32 cacheFlags = 2; * *
     * bits set using CacheFlag
     * 
*/ public int getCacheFlags() { return cacheFlags_; } private void initFields() { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); cacheFlags_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasInfo()) { memoizedIsInitialized = 0; return false; } if (!getInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, info_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt32(2, cacheFlags_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, info_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt32Size(2, cacheFlags_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto) obj; boolean result = true; result = result && (hasInfo() == other.hasInfo()); if (hasInfo()) { result = result && getInfo() .equals(other.getInfo()); } result = result && (hasCacheFlags() == other.hasCacheFlags()); if (hasCacheFlags()) { result = result && (getCacheFlags() == other.getCacheFlags()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasInfo()) { hash = (37 * hash) + INFO_FIELD_NUMBER; hash = (53 * hash) + getInfo().hashCode(); } if (hasCacheFlags()) { hash = (37 * hash) + CACHEFLAGS_FIELD_NUMBER; hash = (53 * hash) + getCacheFlags(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ModifyCacheDirectiveRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCacheDirectiveRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCacheDirectiveRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); cacheFlags_ = 0; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCacheDirectiveRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (infoBuilder_ == null) { result.info_ = info_; } else { result.info_ = infoBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.cacheFlags_ = cacheFlags_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto.getDefaultInstance()) return this; if (other.hasInfo()) { mergeInfo(other.getInfo()); } if (other.hasCacheFlags()) { setCacheFlags(other.getCacheFlags()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasInfo()) { return false; } if (!getInfo().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder> infoBuilder_; /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getInfo() { if (infoBuilder_ == null) { return info_; } else { return infoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder setInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto value) { if (infoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } info_ = value; onChanged(); } else { infoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder setInfo( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder builderForValue) { if (infoBuilder_ == null) { info_ = builderForValue.build(); onChanged(); } else { infoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder mergeInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto value) { if (infoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && info_ != org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance()) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.newBuilder(info_).mergeFrom(value).buildPartial(); } else { info_ = value; } onChanged(); } else { infoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder clearInfo() { if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); onChanged(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder getInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getInfoOrBuilder() { if (infoBuilder_ != null) { return infoBuilder_.getMessageOrBuilder(); } else { return info_; } } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder> getInfoFieldBuilder() { if (infoBuilder_ == null) { infoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder>( info_, getParentForChildren(), isClean()); info_ = null; } return infoBuilder_; } // optional uint32 cacheFlags = 2; private int cacheFlags_ ; /** * optional uint32 cacheFlags = 2; * *
       * bits set using CacheFlag
       * 
*/ public boolean hasCacheFlags() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional uint32 cacheFlags = 2; * *
       * bits set using CacheFlag
       * 
*/ public int getCacheFlags() { return cacheFlags_; } /** * optional uint32 cacheFlags = 2; * *
       * bits set using CacheFlag
       * 
*/ public Builder setCacheFlags(int value) { bitField0_ |= 0x00000002; cacheFlags_ = value; onChanged(); return this; } /** * optional uint32 cacheFlags = 2; * *
       * bits set using CacheFlag
       * 
*/ public Builder clearCacheFlags() { bitField0_ = (bitField0_ & ~0x00000002); cacheFlags_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ModifyCacheDirectiveRequestProto) } static { defaultInstance = new ModifyCacheDirectiveRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ModifyCacheDirectiveRequestProto) } public interface ModifyCacheDirectiveResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.ModifyCacheDirectiveResponseProto} */ public static final class ModifyCacheDirectiveResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ModifyCacheDirectiveResponseProtoOrBuilder { // Use ModifyCacheDirectiveResponseProto.newBuilder() to construct. private ModifyCacheDirectiveResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ModifyCacheDirectiveResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ModifyCacheDirectiveResponseProto defaultInstance; public static ModifyCacheDirectiveResponseProto getDefaultInstance() { return defaultInstance; } public ModifyCacheDirectiveResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ModifyCacheDirectiveResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCacheDirectiveResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCacheDirectiveResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ModifyCacheDirectiveResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ModifyCacheDirectiveResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ModifyCacheDirectiveResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCacheDirectiveResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCacheDirectiveResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCacheDirectiveResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ModifyCacheDirectiveResponseProto) } static { defaultInstance = new ModifyCacheDirectiveResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ModifyCacheDirectiveResponseProto) } public interface RemoveCacheDirectiveRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required int64 id = 1; /** * required int64 id = 1; */ boolean hasId(); /** * required int64 id = 1; */ long getId(); } /** * Protobuf type {@code hadoop.hdfs.RemoveCacheDirectiveRequestProto} */ public static final class RemoveCacheDirectiveRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RemoveCacheDirectiveRequestProtoOrBuilder { // Use RemoveCacheDirectiveRequestProto.newBuilder() to construct. private RemoveCacheDirectiveRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RemoveCacheDirectiveRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RemoveCacheDirectiveRequestProto defaultInstance; public static RemoveCacheDirectiveRequestProto getDefaultInstance() { return defaultInstance; } public RemoveCacheDirectiveRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RemoveCacheDirectiveRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; id_ = input.readInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCacheDirectiveRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCacheDirectiveRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RemoveCacheDirectiveRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RemoveCacheDirectiveRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 id = 1; public static final int ID_FIELD_NUMBER = 1; private long id_; /** * required int64 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 id = 1; */ public long getId() { return id_; } private void initFields() { id_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, id_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, id_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto) obj; boolean result = true; result = result && (hasId() == other.hasId()); if (hasId()) { result = result && (getId() == other.getId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RemoveCacheDirectiveRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCacheDirectiveRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCacheDirectiveRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); id_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCacheDirectiveRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.id_ = id_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto.getDefaultInstance()) return this; if (other.hasId()) { setId(other.getId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasId()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 id = 1; private long id_ ; /** * required int64 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 id = 1; */ public long getId() { return id_; } /** * required int64 id = 1; */ public Builder setId(long value) { bitField0_ |= 0x00000001; id_ = value; onChanged(); return this; } /** * required int64 id = 1; */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000001); id_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RemoveCacheDirectiveRequestProto) } static { defaultInstance = new RemoveCacheDirectiveRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoveCacheDirectiveRequestProto) } public interface RemoveCacheDirectiveResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.RemoveCacheDirectiveResponseProto} */ public static final class RemoveCacheDirectiveResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RemoveCacheDirectiveResponseProtoOrBuilder { // Use RemoveCacheDirectiveResponseProto.newBuilder() to construct. private RemoveCacheDirectiveResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RemoveCacheDirectiveResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RemoveCacheDirectiveResponseProto defaultInstance; public static RemoveCacheDirectiveResponseProto getDefaultInstance() { return defaultInstance; } public RemoveCacheDirectiveResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RemoveCacheDirectiveResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCacheDirectiveResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCacheDirectiveResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RemoveCacheDirectiveResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RemoveCacheDirectiveResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RemoveCacheDirectiveResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCacheDirectiveResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCacheDirectiveResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCacheDirectiveResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RemoveCacheDirectiveResponseProto) } static { defaultInstance = new RemoveCacheDirectiveResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoveCacheDirectiveResponseProto) } public interface ListCacheDirectivesRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required int64 prevId = 1; /** * required int64 prevId = 1; */ boolean hasPrevId(); /** * required int64 prevId = 1; */ long getPrevId(); // required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ boolean hasFilter(); /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getFilter(); /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getFilterOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.ListCacheDirectivesRequestProto} */ public static final class ListCacheDirectivesRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ListCacheDirectivesRequestProtoOrBuilder { // Use ListCacheDirectivesRequestProto.newBuilder() to construct. private ListCacheDirectivesRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ListCacheDirectivesRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ListCacheDirectivesRequestProto defaultInstance; public static ListCacheDirectivesRequestProto getDefaultInstance() { return defaultInstance; } public ListCacheDirectivesRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ListCacheDirectivesRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; prevId_ = input.readInt64(); break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = filter_.toBuilder(); } filter_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(filter_); filter_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCacheDirectivesRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCacheDirectivesRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ListCacheDirectivesRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ListCacheDirectivesRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 prevId = 1; public static final int PREVID_FIELD_NUMBER = 1; private long prevId_; /** * required int64 prevId = 1; */ public boolean hasPrevId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 prevId = 1; */ public long getPrevId() { return prevId_; } // required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; public static final int FILTER_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto filter_; /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ public boolean hasFilter() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getFilter() { return filter_; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getFilterOrBuilder() { return filter_; } private void initFields() { prevId_ = 0L; filter_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPrevId()) { memoizedIsInitialized = 0; return false; } if (!hasFilter()) { memoizedIsInitialized = 0; return false; } if (!getFilter().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, prevId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, filter_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, prevId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(2, filter_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto) obj; boolean result = true; result = result && (hasPrevId() == other.hasPrevId()); if (hasPrevId()) { result = result && (getPrevId() == other.getPrevId()); } result = result && (hasFilter() == other.hasFilter()); if (hasFilter()) { result = result && getFilter() .equals(other.getFilter()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPrevId()) { hash = (37 * hash) + PREVID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getPrevId()); } if (hasFilter()) { hash = (37 * hash) + FILTER_FIELD_NUMBER; hash = (53 * hash) + getFilter().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ListCacheDirectivesRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCacheDirectivesRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCacheDirectivesRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getFilterFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); prevId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); if (filterBuilder_ == null) { filter_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); } else { filterBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCacheDirectivesRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.prevId_ = prevId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (filterBuilder_ == null) { result.filter_ = filter_; } else { result.filter_ = filterBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto.getDefaultInstance()) return this; if (other.hasPrevId()) { setPrevId(other.getPrevId()); } if (other.hasFilter()) { mergeFilter(other.getFilter()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPrevId()) { return false; } if (!hasFilter()) { return false; } if (!getFilter().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 prevId = 1; private long prevId_ ; /** * required int64 prevId = 1; */ public boolean hasPrevId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 prevId = 1; */ public long getPrevId() { return prevId_; } /** * required int64 prevId = 1; */ public Builder setPrevId(long value) { bitField0_ |= 0x00000001; prevId_ = value; onChanged(); return this; } /** * required int64 prevId = 1; */ public Builder clearPrevId() { bitField0_ = (bitField0_ & ~0x00000001); prevId_ = 0L; onChanged(); return this; } // required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto filter_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder> filterBuilder_; /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ public boolean hasFilter() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getFilter() { if (filterBuilder_ == null) { return filter_; } else { return filterBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ public Builder setFilter(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto value) { if (filterBuilder_ == null) { if (value == null) { throw new NullPointerException(); } filter_ = value; onChanged(); } else { filterBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ public Builder setFilter( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder builderForValue) { if (filterBuilder_ == null) { filter_ = builderForValue.build(); onChanged(); } else { filterBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ public Builder mergeFilter(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto value) { if (filterBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && filter_ != org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance()) { filter_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.newBuilder(filter_).mergeFrom(value).buildPartial(); } else { filter_ = value; } onChanged(); } else { filterBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ public Builder clearFilter() { if (filterBuilder_ == null) { filter_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); onChanged(); } else { filterBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder getFilterBuilder() { bitField0_ |= 0x00000002; onChanged(); return getFilterFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getFilterOrBuilder() { if (filterBuilder_ != null) { return filterBuilder_.getMessageOrBuilder(); } else { return filter_; } } /** * required .hadoop.hdfs.CacheDirectiveInfoProto filter = 2; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder> getFilterFieldBuilder() { if (filterBuilder_ == null) { filterBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder>( filter_, getParentForChildren(), isClean()); filter_ = null; } return filterBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ListCacheDirectivesRequestProto) } static { defaultInstance = new ListCacheDirectivesRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ListCacheDirectivesRequestProto) } public interface CacheDirectiveEntryProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ boolean hasInfo(); /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getInfo(); /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getInfoOrBuilder(); // required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ boolean hasStats(); /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto getStats(); /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProtoOrBuilder getStatsOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.CacheDirectiveEntryProto} */ public static final class CacheDirectiveEntryProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CacheDirectiveEntryProtoOrBuilder { // Use CacheDirectiveEntryProto.newBuilder() to construct. private CacheDirectiveEntryProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CacheDirectiveEntryProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CacheDirectiveEntryProto defaultInstance; public static CacheDirectiveEntryProto getDefaultInstance() { return defaultInstance; } public CacheDirectiveEntryProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CacheDirectiveEntryProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = info_.toBuilder(); } info_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(info_); info_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = stats_.toBuilder(); } stats_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(stats_); stats_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveEntryProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CacheDirectiveEntryProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CacheDirectiveEntryProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; public static final int INFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto info_; /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getInfo() { return info_; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getInfoOrBuilder() { return info_; } // required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; public static final int STATS_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto stats_; /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ public boolean hasStats() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto getStats() { return stats_; } /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProtoOrBuilder getStatsOrBuilder() { return stats_; } private void initFields() { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); stats_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasInfo()) { memoizedIsInitialized = 0; return false; } if (!hasStats()) { memoizedIsInitialized = 0; return false; } if (!getInfo().isInitialized()) { memoizedIsInitialized = 0; return false; } if (!getStats().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, info_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, stats_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, info_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(2, stats_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto) obj; boolean result = true; result = result && (hasInfo() == other.hasInfo()); if (hasInfo()) { result = result && getInfo() .equals(other.getInfo()); } result = result && (hasStats() == other.hasStats()); if (hasStats()) { result = result && getStats() .equals(other.getStats()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasInfo()) { hash = (37 * hash) + INFO_FIELD_NUMBER; hash = (53 * hash) + getInfo().hashCode(); } if (hasStats()) { hash = (37 * hash) + STATS_FIELD_NUMBER; hash = (53 * hash) + getStats().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CacheDirectiveEntryProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveEntryProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getInfoFieldBuilder(); getStatsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (statsBuilder_ == null) { stats_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.getDefaultInstance(); } else { statsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CacheDirectiveEntryProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (infoBuilder_ == null) { result.info_ = info_; } else { result.info_ = infoBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (statsBuilder_ == null) { result.stats_ = stats_; } else { result.stats_ = statsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.getDefaultInstance()) return this; if (other.hasInfo()) { mergeInfo(other.getInfo()); } if (other.hasStats()) { mergeStats(other.getStats()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasInfo()) { return false; } if (!hasStats()) { return false; } if (!getInfo().isInitialized()) { return false; } if (!getStats().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder> infoBuilder_; /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto getInfo() { if (infoBuilder_ == null) { return info_; } else { return infoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder setInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto value) { if (infoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } info_ = value; onChanged(); } else { infoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder setInfo( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder builderForValue) { if (infoBuilder_ == null) { info_ = builderForValue.build(); onChanged(); } else { infoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder mergeInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto value) { if (infoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && info_ != org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance()) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.newBuilder(info_).mergeFrom(value).buildPartial(); } else { info_ = value; } onChanged(); } else { infoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public Builder clearInfo() { if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.getDefaultInstance(); onChanged(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder getInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder getInfoOrBuilder() { if (infoBuilder_ != null) { return infoBuilder_.getMessageOrBuilder(); } else { return info_; } } /** * required .hadoop.hdfs.CacheDirectiveInfoProto info = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder> getInfoFieldBuilder() { if (infoBuilder_ == null) { infoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProtoOrBuilder>( info_, getParentForChildren(), isClean()); info_ = null; } return infoBuilder_; } // required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto stats_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProtoOrBuilder> statsBuilder_; /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ public boolean hasStats() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto getStats() { if (statsBuilder_ == null) { return stats_; } else { return statsBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ public Builder setStats(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto value) { if (statsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } stats_ = value; onChanged(); } else { statsBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ public Builder setStats( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.Builder builderForValue) { if (statsBuilder_ == null) { stats_ = builderForValue.build(); onChanged(); } else { statsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ public Builder mergeStats(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto value) { if (statsBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && stats_ != org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.getDefaultInstance()) { stats_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.newBuilder(stats_).mergeFrom(value).buildPartial(); } else { stats_ = value; } onChanged(); } else { statsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ public Builder clearStats() { if (statsBuilder_ == null) { stats_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.getDefaultInstance(); onChanged(); } else { statsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.Builder getStatsBuilder() { bitField0_ |= 0x00000002; onChanged(); return getStatsFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProtoOrBuilder getStatsOrBuilder() { if (statsBuilder_ != null) { return statsBuilder_.getMessageOrBuilder(); } else { return stats_; } } /** * required .hadoop.hdfs.CacheDirectiveStatsProto stats = 2; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProtoOrBuilder> getStatsFieldBuilder() { if (statsBuilder_ == null) { statsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProtoOrBuilder>( stats_, getParentForChildren(), isClean()); stats_ = null; } return statsBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CacheDirectiveEntryProto) } static { defaultInstance = new CacheDirectiveEntryProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CacheDirectiveEntryProto) } public interface ListCacheDirectivesResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ java.util.List getElementsList(); /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto getElements(int index); /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ int getElementsCount(); /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ java.util.List getElementsOrBuilderList(); /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProtoOrBuilder getElementsOrBuilder( int index); // required bool hasMore = 2; /** * required bool hasMore = 2; */ boolean hasHasMore(); /** * required bool hasMore = 2; */ boolean getHasMore(); } /** * Protobuf type {@code hadoop.hdfs.ListCacheDirectivesResponseProto} */ public static final class ListCacheDirectivesResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ListCacheDirectivesResponseProtoOrBuilder { // Use ListCacheDirectivesResponseProto.newBuilder() to construct. private ListCacheDirectivesResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ListCacheDirectivesResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ListCacheDirectivesResponseProto defaultInstance; public static ListCacheDirectivesResponseProto getDefaultInstance() { return defaultInstance; } public ListCacheDirectivesResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ListCacheDirectivesResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { elements_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } elements_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.PARSER, extensionRegistry)); break; } case 16: { bitField0_ |= 0x00000001; hasMore_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { elements_ = java.util.Collections.unmodifiableList(elements_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCacheDirectivesResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCacheDirectivesResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ListCacheDirectivesResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ListCacheDirectivesResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; public static final int ELEMENTS_FIELD_NUMBER = 1; private java.util.List elements_; /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public java.util.List getElementsList() { return elements_; } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public java.util.List getElementsOrBuilderList() { return elements_; } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public int getElementsCount() { return elements_.size(); } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto getElements(int index) { return elements_.get(index); } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProtoOrBuilder getElementsOrBuilder( int index) { return elements_.get(index); } // required bool hasMore = 2; public static final int HASMORE_FIELD_NUMBER = 2; private boolean hasMore_; /** * required bool hasMore = 2; */ public boolean hasHasMore() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool hasMore = 2; */ public boolean getHasMore() { return hasMore_; } private void initFields() { elements_ = java.util.Collections.emptyList(); hasMore_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasHasMore()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getElementsCount(); i++) { if (!getElements(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < elements_.size(); i++) { output.writeMessage(1, elements_.get(i)); } if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(2, hasMore_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < elements_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, elements_.get(i)); } if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(2, hasMore_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto) obj; boolean result = true; result = result && getElementsList() .equals(other.getElementsList()); result = result && (hasHasMore() == other.hasHasMore()); if (hasHasMore()) { result = result && (getHasMore() == other.getHasMore()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getElementsCount() > 0) { hash = (37 * hash) + ELEMENTS_FIELD_NUMBER; hash = (53 * hash) + getElementsList().hashCode(); } if (hasHasMore()) { hash = (37 * hash) + HASMORE_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getHasMore()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ListCacheDirectivesResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCacheDirectivesResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCacheDirectivesResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getElementsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (elementsBuilder_ == null) { elements_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { elementsBuilder_.clear(); } hasMore_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCacheDirectivesResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (elementsBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { elements_ = java.util.Collections.unmodifiableList(elements_); bitField0_ = (bitField0_ & ~0x00000001); } result.elements_ = elements_; } else { result.elements_ = elementsBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000001; } result.hasMore_ = hasMore_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.getDefaultInstance()) return this; if (elementsBuilder_ == null) { if (!other.elements_.isEmpty()) { if (elements_.isEmpty()) { elements_ = other.elements_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureElementsIsMutable(); elements_.addAll(other.elements_); } onChanged(); } } else { if (!other.elements_.isEmpty()) { if (elementsBuilder_.isEmpty()) { elementsBuilder_.dispose(); elementsBuilder_ = null; elements_ = other.elements_; bitField0_ = (bitField0_ & ~0x00000001); elementsBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getElementsFieldBuilder() : null; } else { elementsBuilder_.addAllMessages(other.elements_); } } } if (other.hasHasMore()) { setHasMore(other.getHasMore()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasHasMore()) { return false; } for (int i = 0; i < getElementsCount(); i++) { if (!getElements(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; private java.util.List elements_ = java.util.Collections.emptyList(); private void ensureElementsIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { elements_ = new java.util.ArrayList(elements_); bitField0_ |= 0x00000001; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProtoOrBuilder> elementsBuilder_; /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public java.util.List getElementsList() { if (elementsBuilder_ == null) { return java.util.Collections.unmodifiableList(elements_); } else { return elementsBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public int getElementsCount() { if (elementsBuilder_ == null) { return elements_.size(); } else { return elementsBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto getElements(int index) { if (elementsBuilder_ == null) { return elements_.get(index); } else { return elementsBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public Builder setElements( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto value) { if (elementsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureElementsIsMutable(); elements_.set(index, value); onChanged(); } else { elementsBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public Builder setElements( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.Builder builderForValue) { if (elementsBuilder_ == null) { ensureElementsIsMutable(); elements_.set(index, builderForValue.build()); onChanged(); } else { elementsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public Builder addElements(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto value) { if (elementsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureElementsIsMutable(); elements_.add(value); onChanged(); } else { elementsBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public Builder addElements( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto value) { if (elementsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureElementsIsMutable(); elements_.add(index, value); onChanged(); } else { elementsBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public Builder addElements( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.Builder builderForValue) { if (elementsBuilder_ == null) { ensureElementsIsMutable(); elements_.add(builderForValue.build()); onChanged(); } else { elementsBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public Builder addElements( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.Builder builderForValue) { if (elementsBuilder_ == null) { ensureElementsIsMutable(); elements_.add(index, builderForValue.build()); onChanged(); } else { elementsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public Builder addAllElements( java.lang.Iterable values) { if (elementsBuilder_ == null) { ensureElementsIsMutable(); super.addAll(values, elements_); onChanged(); } else { elementsBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public Builder clearElements() { if (elementsBuilder_ == null) { elements_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { elementsBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public Builder removeElements(int index) { if (elementsBuilder_ == null) { ensureElementsIsMutable(); elements_.remove(index); onChanged(); } else { elementsBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.Builder getElementsBuilder( int index) { return getElementsFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProtoOrBuilder getElementsOrBuilder( int index) { if (elementsBuilder_ == null) { return elements_.get(index); } else { return elementsBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public java.util.List getElementsOrBuilderList() { if (elementsBuilder_ != null) { return elementsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(elements_); } } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.Builder addElementsBuilder() { return getElementsFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.Builder addElementsBuilder( int index) { return getElementsFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.CacheDirectiveEntryProto elements = 1; */ public java.util.List getElementsBuilderList() { return getElementsFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProtoOrBuilder> getElementsFieldBuilder() { if (elementsBuilder_ == null) { elementsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProtoOrBuilder>( elements_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); elements_ = null; } return elementsBuilder_; } // required bool hasMore = 2; private boolean hasMore_ ; /** * required bool hasMore = 2; */ public boolean hasHasMore() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bool hasMore = 2; */ public boolean getHasMore() { return hasMore_; } /** * required bool hasMore = 2; */ public Builder setHasMore(boolean value) { bitField0_ |= 0x00000002; hasMore_ = value; onChanged(); return this; } /** * required bool hasMore = 2; */ public Builder clearHasMore() { bitField0_ = (bitField0_ & ~0x00000002); hasMore_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ListCacheDirectivesResponseProto) } static { defaultInstance = new ListCacheDirectivesResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ListCacheDirectivesResponseProto) } public interface CachePoolInfoProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional string poolName = 1; /** * optional string poolName = 1; */ boolean hasPoolName(); /** * optional string poolName = 1; */ java.lang.String getPoolName(); /** * optional string poolName = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPoolNameBytes(); // optional string ownerName = 2; /** * optional string ownerName = 2; */ boolean hasOwnerName(); /** * optional string ownerName = 2; */ java.lang.String getOwnerName(); /** * optional string ownerName = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getOwnerNameBytes(); // optional string groupName = 3; /** * optional string groupName = 3; */ boolean hasGroupName(); /** * optional string groupName = 3; */ java.lang.String getGroupName(); /** * optional string groupName = 3; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getGroupNameBytes(); // optional int32 mode = 4; /** * optional int32 mode = 4; */ boolean hasMode(); /** * optional int32 mode = 4; */ int getMode(); // optional int64 limit = 5; /** * optional int64 limit = 5; */ boolean hasLimit(); /** * optional int64 limit = 5; */ long getLimit(); // optional int64 maxRelativeExpiry = 6; /** * optional int64 maxRelativeExpiry = 6; */ boolean hasMaxRelativeExpiry(); /** * optional int64 maxRelativeExpiry = 6; */ long getMaxRelativeExpiry(); // optional uint32 defaultReplication = 7 [default = 1]; /** * optional uint32 defaultReplication = 7 [default = 1]; */ boolean hasDefaultReplication(); /** * optional uint32 defaultReplication = 7 [default = 1]; */ int getDefaultReplication(); } /** * Protobuf type {@code hadoop.hdfs.CachePoolInfoProto} */ public static final class CachePoolInfoProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CachePoolInfoProtoOrBuilder { // Use CachePoolInfoProto.newBuilder() to construct. private CachePoolInfoProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CachePoolInfoProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CachePoolInfoProto defaultInstance; public static CachePoolInfoProto getDefaultInstance() { return defaultInstance; } public CachePoolInfoProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CachePoolInfoProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; poolName_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; ownerName_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; groupName_ = input.readBytes(); break; } case 32: { bitField0_ |= 0x00000008; mode_ = input.readInt32(); break; } case 40: { bitField0_ |= 0x00000010; limit_ = input.readInt64(); break; } case 48: { bitField0_ |= 0x00000020; maxRelativeExpiry_ = input.readInt64(); break; } case 56: { bitField0_ |= 0x00000040; defaultReplication_ = input.readUInt32(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolInfoProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CachePoolInfoProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CachePoolInfoProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional string poolName = 1; public static final int POOLNAME_FIELD_NUMBER = 1; private java.lang.Object poolName_; /** * optional string poolName = 1; */ public boolean hasPoolName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string poolName = 1; */ public java.lang.String getPoolName() { java.lang.Object ref = poolName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { poolName_ = s; } return s; } } /** * optional string poolName = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPoolNameBytes() { java.lang.Object ref = poolName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); poolName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional string ownerName = 2; public static final int OWNERNAME_FIELD_NUMBER = 2; private java.lang.Object ownerName_; /** * optional string ownerName = 2; */ public boolean hasOwnerName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string ownerName = 2; */ public java.lang.String getOwnerName() { java.lang.Object ref = ownerName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { ownerName_ = s; } return s; } } /** * optional string ownerName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getOwnerNameBytes() { java.lang.Object ref = ownerName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ownerName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional string groupName = 3; public static final int GROUPNAME_FIELD_NUMBER = 3; private java.lang.Object groupName_; /** * optional string groupName = 3; */ public boolean hasGroupName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional string groupName = 3; */ public java.lang.String getGroupName() { java.lang.Object ref = groupName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { groupName_ = s; } return s; } } /** * optional string groupName = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getGroupNameBytes() { java.lang.Object ref = groupName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); groupName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional int32 mode = 4; public static final int MODE_FIELD_NUMBER = 4; private int mode_; /** * optional int32 mode = 4; */ public boolean hasMode() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional int32 mode = 4; */ public int getMode() { return mode_; } // optional int64 limit = 5; public static final int LIMIT_FIELD_NUMBER = 5; private long limit_; /** * optional int64 limit = 5; */ public boolean hasLimit() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional int64 limit = 5; */ public long getLimit() { return limit_; } // optional int64 maxRelativeExpiry = 6; public static final int MAXRELATIVEEXPIRY_FIELD_NUMBER = 6; private long maxRelativeExpiry_; /** * optional int64 maxRelativeExpiry = 6; */ public boolean hasMaxRelativeExpiry() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional int64 maxRelativeExpiry = 6; */ public long getMaxRelativeExpiry() { return maxRelativeExpiry_; } // optional uint32 defaultReplication = 7 [default = 1]; public static final int DEFAULTREPLICATION_FIELD_NUMBER = 7; private int defaultReplication_; /** * optional uint32 defaultReplication = 7 [default = 1]; */ public boolean hasDefaultReplication() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint32 defaultReplication = 7 [default = 1]; */ public int getDefaultReplication() { return defaultReplication_; } private void initFields() { poolName_ = ""; ownerName_ = ""; groupName_ = ""; mode_ = 0; limit_ = 0L; maxRelativeExpiry_ = 0L; defaultReplication_ = 1; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getPoolNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getOwnerNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getGroupNameBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeInt32(4, mode_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeInt64(5, limit_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeInt64(6, maxRelativeExpiry_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeUInt32(7, defaultReplication_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getPoolNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getOwnerNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(3, getGroupNameBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt32Size(4, mode_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(5, limit_); } if (((bitField0_ & 0x00000020) == 0x00000020)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(6, maxRelativeExpiry_); } if (((bitField0_ & 0x00000040) == 0x00000040)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt32Size(7, defaultReplication_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto) obj; boolean result = true; result = result && (hasPoolName() == other.hasPoolName()); if (hasPoolName()) { result = result && getPoolName() .equals(other.getPoolName()); } result = result && (hasOwnerName() == other.hasOwnerName()); if (hasOwnerName()) { result = result && getOwnerName() .equals(other.getOwnerName()); } result = result && (hasGroupName() == other.hasGroupName()); if (hasGroupName()) { result = result && getGroupName() .equals(other.getGroupName()); } result = result && (hasMode() == other.hasMode()); if (hasMode()) { result = result && (getMode() == other.getMode()); } result = result && (hasLimit() == other.hasLimit()); if (hasLimit()) { result = result && (getLimit() == other.getLimit()); } result = result && (hasMaxRelativeExpiry() == other.hasMaxRelativeExpiry()); if (hasMaxRelativeExpiry()) { result = result && (getMaxRelativeExpiry() == other.getMaxRelativeExpiry()); } result = result && (hasDefaultReplication() == other.hasDefaultReplication()); if (hasDefaultReplication()) { result = result && (getDefaultReplication() == other.getDefaultReplication()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPoolName()) { hash = (37 * hash) + POOLNAME_FIELD_NUMBER; hash = (53 * hash) + getPoolName().hashCode(); } if (hasOwnerName()) { hash = (37 * hash) + OWNERNAME_FIELD_NUMBER; hash = (53 * hash) + getOwnerName().hashCode(); } if (hasGroupName()) { hash = (37 * hash) + GROUPNAME_FIELD_NUMBER; hash = (53 * hash) + getGroupName().hashCode(); } if (hasMode()) { hash = (37 * hash) + MODE_FIELD_NUMBER; hash = (53 * hash) + getMode(); } if (hasLimit()) { hash = (37 * hash) + LIMIT_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLimit()); } if (hasMaxRelativeExpiry()) { hash = (37 * hash) + MAXRELATIVEEXPIRY_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMaxRelativeExpiry()); } if (hasDefaultReplication()) { hash = (37 * hash) + DEFAULTREPLICATION_FIELD_NUMBER; hash = (53 * hash) + getDefaultReplication(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CachePoolInfoProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolInfoProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolInfoProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); poolName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); ownerName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); groupName_ = ""; bitField0_ = (bitField0_ & ~0x00000004); mode_ = 0; bitField0_ = (bitField0_ & ~0x00000008); limit_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); maxRelativeExpiry_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); defaultReplication_ = 1; bitField0_ = (bitField0_ & ~0x00000040); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolInfoProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.poolName_ = poolName_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.ownerName_ = ownerName_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.groupName_ = groupName_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.mode_ = mode_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.limit_ = limit_; if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.maxRelativeExpiry_ = maxRelativeExpiry_; if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } result.defaultReplication_ = defaultReplication_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance()) return this; if (other.hasPoolName()) { bitField0_ |= 0x00000001; poolName_ = other.poolName_; onChanged(); } if (other.hasOwnerName()) { bitField0_ |= 0x00000002; ownerName_ = other.ownerName_; onChanged(); } if (other.hasGroupName()) { bitField0_ |= 0x00000004; groupName_ = other.groupName_; onChanged(); } if (other.hasMode()) { setMode(other.getMode()); } if (other.hasLimit()) { setLimit(other.getLimit()); } if (other.hasMaxRelativeExpiry()) { setMaxRelativeExpiry(other.getMaxRelativeExpiry()); } if (other.hasDefaultReplication()) { setDefaultReplication(other.getDefaultReplication()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional string poolName = 1; private java.lang.Object poolName_ = ""; /** * optional string poolName = 1; */ public boolean hasPoolName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string poolName = 1; */ public java.lang.String getPoolName() { java.lang.Object ref = poolName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); poolName_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string poolName = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPoolNameBytes() { java.lang.Object ref = poolName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); poolName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string poolName = 1; */ public Builder setPoolName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; poolName_ = value; onChanged(); return this; } /** * optional string poolName = 1; */ public Builder clearPoolName() { bitField0_ = (bitField0_ & ~0x00000001); poolName_ = getDefaultInstance().getPoolName(); onChanged(); return this; } /** * optional string poolName = 1; */ public Builder setPoolNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; poolName_ = value; onChanged(); return this; } // optional string ownerName = 2; private java.lang.Object ownerName_ = ""; /** * optional string ownerName = 2; */ public boolean hasOwnerName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string ownerName = 2; */ public java.lang.String getOwnerName() { java.lang.Object ref = ownerName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); ownerName_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string ownerName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getOwnerNameBytes() { java.lang.Object ref = ownerName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); ownerName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string ownerName = 2; */ public Builder setOwnerName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; ownerName_ = value; onChanged(); return this; } /** * optional string ownerName = 2; */ public Builder clearOwnerName() { bitField0_ = (bitField0_ & ~0x00000002); ownerName_ = getDefaultInstance().getOwnerName(); onChanged(); return this; } /** * optional string ownerName = 2; */ public Builder setOwnerNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; ownerName_ = value; onChanged(); return this; } // optional string groupName = 3; private java.lang.Object groupName_ = ""; /** * optional string groupName = 3; */ public boolean hasGroupName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional string groupName = 3; */ public java.lang.String getGroupName() { java.lang.Object ref = groupName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); groupName_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string groupName = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getGroupNameBytes() { java.lang.Object ref = groupName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); groupName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string groupName = 3; */ public Builder setGroupName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; groupName_ = value; onChanged(); return this; } /** * optional string groupName = 3; */ public Builder clearGroupName() { bitField0_ = (bitField0_ & ~0x00000004); groupName_ = getDefaultInstance().getGroupName(); onChanged(); return this; } /** * optional string groupName = 3; */ public Builder setGroupNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; groupName_ = value; onChanged(); return this; } // optional int32 mode = 4; private int mode_ ; /** * optional int32 mode = 4; */ public boolean hasMode() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional int32 mode = 4; */ public int getMode() { return mode_; } /** * optional int32 mode = 4; */ public Builder setMode(int value) { bitField0_ |= 0x00000008; mode_ = value; onChanged(); return this; } /** * optional int32 mode = 4; */ public Builder clearMode() { bitField0_ = (bitField0_ & ~0x00000008); mode_ = 0; onChanged(); return this; } // optional int64 limit = 5; private long limit_ ; /** * optional int64 limit = 5; */ public boolean hasLimit() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional int64 limit = 5; */ public long getLimit() { return limit_; } /** * optional int64 limit = 5; */ public Builder setLimit(long value) { bitField0_ |= 0x00000010; limit_ = value; onChanged(); return this; } /** * optional int64 limit = 5; */ public Builder clearLimit() { bitField0_ = (bitField0_ & ~0x00000010); limit_ = 0L; onChanged(); return this; } // optional int64 maxRelativeExpiry = 6; private long maxRelativeExpiry_ ; /** * optional int64 maxRelativeExpiry = 6; */ public boolean hasMaxRelativeExpiry() { return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional int64 maxRelativeExpiry = 6; */ public long getMaxRelativeExpiry() { return maxRelativeExpiry_; } /** * optional int64 maxRelativeExpiry = 6; */ public Builder setMaxRelativeExpiry(long value) { bitField0_ |= 0x00000020; maxRelativeExpiry_ = value; onChanged(); return this; } /** * optional int64 maxRelativeExpiry = 6; */ public Builder clearMaxRelativeExpiry() { bitField0_ = (bitField0_ & ~0x00000020); maxRelativeExpiry_ = 0L; onChanged(); return this; } // optional uint32 defaultReplication = 7 [default = 1]; private int defaultReplication_ = 1; /** * optional uint32 defaultReplication = 7 [default = 1]; */ public boolean hasDefaultReplication() { return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional uint32 defaultReplication = 7 [default = 1]; */ public int getDefaultReplication() { return defaultReplication_; } /** * optional uint32 defaultReplication = 7 [default = 1]; */ public Builder setDefaultReplication(int value) { bitField0_ |= 0x00000040; defaultReplication_ = value; onChanged(); return this; } /** * optional uint32 defaultReplication = 7 [default = 1]; */ public Builder clearDefaultReplication() { bitField0_ = (bitField0_ & ~0x00000040); defaultReplication_ = 1; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CachePoolInfoProto) } static { defaultInstance = new CachePoolInfoProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CachePoolInfoProto) } public interface CachePoolStatsProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required int64 bytesNeeded = 1; /** * required int64 bytesNeeded = 1; */ boolean hasBytesNeeded(); /** * required int64 bytesNeeded = 1; */ long getBytesNeeded(); // required int64 bytesCached = 2; /** * required int64 bytesCached = 2; */ boolean hasBytesCached(); /** * required int64 bytesCached = 2; */ long getBytesCached(); // required int64 bytesOverlimit = 3; /** * required int64 bytesOverlimit = 3; */ boolean hasBytesOverlimit(); /** * required int64 bytesOverlimit = 3; */ long getBytesOverlimit(); // required int64 filesNeeded = 4; /** * required int64 filesNeeded = 4; */ boolean hasFilesNeeded(); /** * required int64 filesNeeded = 4; */ long getFilesNeeded(); // required int64 filesCached = 5; /** * required int64 filesCached = 5; */ boolean hasFilesCached(); /** * required int64 filesCached = 5; */ long getFilesCached(); } /** * Protobuf type {@code hadoop.hdfs.CachePoolStatsProto} */ public static final class CachePoolStatsProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CachePoolStatsProtoOrBuilder { // Use CachePoolStatsProto.newBuilder() to construct. private CachePoolStatsProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CachePoolStatsProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CachePoolStatsProto defaultInstance; public static CachePoolStatsProto getDefaultInstance() { return defaultInstance; } public CachePoolStatsProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CachePoolStatsProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; bytesNeeded_ = input.readInt64(); break; } case 16: { bitField0_ |= 0x00000002; bytesCached_ = input.readInt64(); break; } case 24: { bitField0_ |= 0x00000004; bytesOverlimit_ = input.readInt64(); break; } case 32: { bitField0_ |= 0x00000008; filesNeeded_ = input.readInt64(); break; } case 40: { bitField0_ |= 0x00000010; filesCached_ = input.readInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolStatsProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolStatsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CachePoolStatsProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CachePoolStatsProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 bytesNeeded = 1; public static final int BYTESNEEDED_FIELD_NUMBER = 1; private long bytesNeeded_; /** * required int64 bytesNeeded = 1; */ public boolean hasBytesNeeded() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 bytesNeeded = 1; */ public long getBytesNeeded() { return bytesNeeded_; } // required int64 bytesCached = 2; public static final int BYTESCACHED_FIELD_NUMBER = 2; private long bytesCached_; /** * required int64 bytesCached = 2; */ public boolean hasBytesCached() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required int64 bytesCached = 2; */ public long getBytesCached() { return bytesCached_; } // required int64 bytesOverlimit = 3; public static final int BYTESOVERLIMIT_FIELD_NUMBER = 3; private long bytesOverlimit_; /** * required int64 bytesOverlimit = 3; */ public boolean hasBytesOverlimit() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required int64 bytesOverlimit = 3; */ public long getBytesOverlimit() { return bytesOverlimit_; } // required int64 filesNeeded = 4; public static final int FILESNEEDED_FIELD_NUMBER = 4; private long filesNeeded_; /** * required int64 filesNeeded = 4; */ public boolean hasFilesNeeded() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required int64 filesNeeded = 4; */ public long getFilesNeeded() { return filesNeeded_; } // required int64 filesCached = 5; public static final int FILESCACHED_FIELD_NUMBER = 5; private long filesCached_; /** * required int64 filesCached = 5; */ public boolean hasFilesCached() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required int64 filesCached = 5; */ public long getFilesCached() { return filesCached_; } private void initFields() { bytesNeeded_ = 0L; bytesCached_ = 0L; bytesOverlimit_ = 0L; filesNeeded_ = 0L; filesCached_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBytesNeeded()) { memoizedIsInitialized = 0; return false; } if (!hasBytesCached()) { memoizedIsInitialized = 0; return false; } if (!hasBytesOverlimit()) { memoizedIsInitialized = 0; return false; } if (!hasFilesNeeded()) { memoizedIsInitialized = 0; return false; } if (!hasFilesCached()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, bytesNeeded_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt64(2, bytesCached_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt64(3, bytesOverlimit_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeInt64(4, filesNeeded_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeInt64(5, filesCached_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, bytesNeeded_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(2, bytesCached_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(3, bytesOverlimit_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(4, filesNeeded_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(5, filesCached_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto) obj; boolean result = true; result = result && (hasBytesNeeded() == other.hasBytesNeeded()); if (hasBytesNeeded()) { result = result && (getBytesNeeded() == other.getBytesNeeded()); } result = result && (hasBytesCached() == other.hasBytesCached()); if (hasBytesCached()) { result = result && (getBytesCached() == other.getBytesCached()); } result = result && (hasBytesOverlimit() == other.hasBytesOverlimit()); if (hasBytesOverlimit()) { result = result && (getBytesOverlimit() == other.getBytesOverlimit()); } result = result && (hasFilesNeeded() == other.hasFilesNeeded()); if (hasFilesNeeded()) { result = result && (getFilesNeeded() == other.getFilesNeeded()); } result = result && (hasFilesCached() == other.hasFilesCached()); if (hasFilesCached()) { result = result && (getFilesCached() == other.getFilesCached()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBytesNeeded()) { hash = (37 * hash) + BYTESNEEDED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBytesNeeded()); } if (hasBytesCached()) { hash = (37 * hash) + BYTESCACHED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBytesCached()); } if (hasBytesOverlimit()) { hash = (37 * hash) + BYTESOVERLIMIT_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBytesOverlimit()); } if (hasFilesNeeded()) { hash = (37 * hash) + FILESNEEDED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFilesNeeded()); } if (hasFilesCached()) { hash = (37 * hash) + FILESCACHED_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFilesCached()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CachePoolStatsProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolStatsProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolStatsProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); bytesNeeded_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); bytesCached_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); bytesOverlimit_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); filesNeeded_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); filesCached_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolStatsProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.bytesNeeded_ = bytesNeeded_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.bytesCached_ = bytesCached_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.bytesOverlimit_ = bytesOverlimit_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.filesNeeded_ = filesNeeded_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.filesCached_ = filesCached_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.getDefaultInstance()) return this; if (other.hasBytesNeeded()) { setBytesNeeded(other.getBytesNeeded()); } if (other.hasBytesCached()) { setBytesCached(other.getBytesCached()); } if (other.hasBytesOverlimit()) { setBytesOverlimit(other.getBytesOverlimit()); } if (other.hasFilesNeeded()) { setFilesNeeded(other.getFilesNeeded()); } if (other.hasFilesCached()) { setFilesCached(other.getFilesCached()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBytesNeeded()) { return false; } if (!hasBytesCached()) { return false; } if (!hasBytesOverlimit()) { return false; } if (!hasFilesNeeded()) { return false; } if (!hasFilesCached()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 bytesNeeded = 1; private long bytesNeeded_ ; /** * required int64 bytesNeeded = 1; */ public boolean hasBytesNeeded() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 bytesNeeded = 1; */ public long getBytesNeeded() { return bytesNeeded_; } /** * required int64 bytesNeeded = 1; */ public Builder setBytesNeeded(long value) { bitField0_ |= 0x00000001; bytesNeeded_ = value; onChanged(); return this; } /** * required int64 bytesNeeded = 1; */ public Builder clearBytesNeeded() { bitField0_ = (bitField0_ & ~0x00000001); bytesNeeded_ = 0L; onChanged(); return this; } // required int64 bytesCached = 2; private long bytesCached_ ; /** * required int64 bytesCached = 2; */ public boolean hasBytesCached() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required int64 bytesCached = 2; */ public long getBytesCached() { return bytesCached_; } /** * required int64 bytesCached = 2; */ public Builder setBytesCached(long value) { bitField0_ |= 0x00000002; bytesCached_ = value; onChanged(); return this; } /** * required int64 bytesCached = 2; */ public Builder clearBytesCached() { bitField0_ = (bitField0_ & ~0x00000002); bytesCached_ = 0L; onChanged(); return this; } // required int64 bytesOverlimit = 3; private long bytesOverlimit_ ; /** * required int64 bytesOverlimit = 3; */ public boolean hasBytesOverlimit() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required int64 bytesOverlimit = 3; */ public long getBytesOverlimit() { return bytesOverlimit_; } /** * required int64 bytesOverlimit = 3; */ public Builder setBytesOverlimit(long value) { bitField0_ |= 0x00000004; bytesOverlimit_ = value; onChanged(); return this; } /** * required int64 bytesOverlimit = 3; */ public Builder clearBytesOverlimit() { bitField0_ = (bitField0_ & ~0x00000004); bytesOverlimit_ = 0L; onChanged(); return this; } // required int64 filesNeeded = 4; private long filesNeeded_ ; /** * required int64 filesNeeded = 4; */ public boolean hasFilesNeeded() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required int64 filesNeeded = 4; */ public long getFilesNeeded() { return filesNeeded_; } /** * required int64 filesNeeded = 4; */ public Builder setFilesNeeded(long value) { bitField0_ |= 0x00000008; filesNeeded_ = value; onChanged(); return this; } /** * required int64 filesNeeded = 4; */ public Builder clearFilesNeeded() { bitField0_ = (bitField0_ & ~0x00000008); filesNeeded_ = 0L; onChanged(); return this; } // required int64 filesCached = 5; private long filesCached_ ; /** * required int64 filesCached = 5; */ public boolean hasFilesCached() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** * required int64 filesCached = 5; */ public long getFilesCached() { return filesCached_; } /** * required int64 filesCached = 5; */ public Builder setFilesCached(long value) { bitField0_ |= 0x00000010; filesCached_ = value; onChanged(); return this; } /** * required int64 filesCached = 5; */ public Builder clearFilesCached() { bitField0_ = (bitField0_ & ~0x00000010); filesCached_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CachePoolStatsProto) } static { defaultInstance = new CachePoolStatsProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CachePoolStatsProto) } public interface AddCachePoolRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.CachePoolInfoProto info = 1; /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ boolean hasInfo(); /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto getInfo(); /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder getInfoOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.AddCachePoolRequestProto} */ public static final class AddCachePoolRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AddCachePoolRequestProtoOrBuilder { // Use AddCachePoolRequestProto.newBuilder() to construct. private AddCachePoolRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AddCachePoolRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AddCachePoolRequestProto defaultInstance; public static AddCachePoolRequestProto getDefaultInstance() { return defaultInstance; } public AddCachePoolRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AddCachePoolRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = info_.toBuilder(); } info_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(info_); info_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCachePoolRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCachePoolRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AddCachePoolRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AddCachePoolRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.CachePoolInfoProto info = 1; public static final int INFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto info_; /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto getInfo() { return info_; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder getInfoOrBuilder() { return info_; } private void initFields() { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasInfo()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, info_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, info_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto) obj; boolean result = true; result = result && (hasInfo() == other.hasInfo()); if (hasInfo()) { result = result && getInfo() .equals(other.getInfo()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasInfo()) { hash = (37 * hash) + INFO_FIELD_NUMBER; hash = (53 * hash) + getInfo().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AddCachePoolRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCachePoolRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCachePoolRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCachePoolRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (infoBuilder_ == null) { result.info_ = info_; } else { result.info_ = infoBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto.getDefaultInstance()) return this; if (other.hasInfo()) { mergeInfo(other.getInfo()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasInfo()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.CachePoolInfoProto info = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder> infoBuilder_; /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto getInfo() { if (infoBuilder_ == null) { return info_; } else { return infoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder setInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto value) { if (infoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } info_ = value; onChanged(); } else { infoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder setInfo( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder builderForValue) { if (infoBuilder_ == null) { info_ = builderForValue.build(); onChanged(); } else { infoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder mergeInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto value) { if (infoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && info_ != org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance()) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.newBuilder(info_).mergeFrom(value).buildPartial(); } else { info_ = value; } onChanged(); } else { infoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder clearInfo() { if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); onChanged(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder getInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder getInfoOrBuilder() { if (infoBuilder_ != null) { return infoBuilder_.getMessageOrBuilder(); } else { return info_; } } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder> getInfoFieldBuilder() { if (infoBuilder_ == null) { infoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder>( info_, getParentForChildren(), isClean()); info_ = null; } return infoBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AddCachePoolRequestProto) } static { defaultInstance = new AddCachePoolRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AddCachePoolRequestProto) } public interface AddCachePoolResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.AddCachePoolResponseProto} * *
   * void response
   * 
*/ public static final class AddCachePoolResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AddCachePoolResponseProtoOrBuilder { // Use AddCachePoolResponseProto.newBuilder() to construct. private AddCachePoolResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AddCachePoolResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AddCachePoolResponseProto defaultInstance; public static AddCachePoolResponseProto getDefaultInstance() { return defaultInstance; } public AddCachePoolResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AddCachePoolResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCachePoolResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCachePoolResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AddCachePoolResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AddCachePoolResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AddCachePoolResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCachePoolResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCachePoolResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AddCachePoolResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AddCachePoolResponseProto) } static { defaultInstance = new AddCachePoolResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AddCachePoolResponseProto) } public interface ModifyCachePoolRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.CachePoolInfoProto info = 1; /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ boolean hasInfo(); /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto getInfo(); /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder getInfoOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.ModifyCachePoolRequestProto} */ public static final class ModifyCachePoolRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ModifyCachePoolRequestProtoOrBuilder { // Use ModifyCachePoolRequestProto.newBuilder() to construct. private ModifyCachePoolRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ModifyCachePoolRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ModifyCachePoolRequestProto defaultInstance; public static ModifyCachePoolRequestProto getDefaultInstance() { return defaultInstance; } public ModifyCachePoolRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ModifyCachePoolRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = info_.toBuilder(); } info_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(info_); info_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCachePoolRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCachePoolRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ModifyCachePoolRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ModifyCachePoolRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.CachePoolInfoProto info = 1; public static final int INFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto info_; /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto getInfo() { return info_; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder getInfoOrBuilder() { return info_; } private void initFields() { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasInfo()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, info_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, info_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto) obj; boolean result = true; result = result && (hasInfo() == other.hasInfo()); if (hasInfo()) { result = result && getInfo() .equals(other.getInfo()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasInfo()) { hash = (37 * hash) + INFO_FIELD_NUMBER; hash = (53 * hash) + getInfo().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ModifyCachePoolRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCachePoolRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCachePoolRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCachePoolRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (infoBuilder_ == null) { result.info_ = info_; } else { result.info_ = infoBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto.getDefaultInstance()) return this; if (other.hasInfo()) { mergeInfo(other.getInfo()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasInfo()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.CachePoolInfoProto info = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder> infoBuilder_; /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto getInfo() { if (infoBuilder_ == null) { return info_; } else { return infoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder setInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto value) { if (infoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } info_ = value; onChanged(); } else { infoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder setInfo( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder builderForValue) { if (infoBuilder_ == null) { info_ = builderForValue.build(); onChanged(); } else { infoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder mergeInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto value) { if (infoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && info_ != org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance()) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.newBuilder(info_).mergeFrom(value).buildPartial(); } else { info_ = value; } onChanged(); } else { infoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder clearInfo() { if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); onChanged(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder getInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder getInfoOrBuilder() { if (infoBuilder_ != null) { return infoBuilder_.getMessageOrBuilder(); } else { return info_; } } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder> getInfoFieldBuilder() { if (infoBuilder_ == null) { infoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder>( info_, getParentForChildren(), isClean()); info_ = null; } return infoBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ModifyCachePoolRequestProto) } static { defaultInstance = new ModifyCachePoolRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ModifyCachePoolRequestProto) } public interface ModifyCachePoolResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.ModifyCachePoolResponseProto} * *
   * void response
   * 
*/ public static final class ModifyCachePoolResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ModifyCachePoolResponseProtoOrBuilder { // Use ModifyCachePoolResponseProto.newBuilder() to construct. private ModifyCachePoolResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ModifyCachePoolResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ModifyCachePoolResponseProto defaultInstance; public static ModifyCachePoolResponseProto getDefaultInstance() { return defaultInstance; } public ModifyCachePoolResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ModifyCachePoolResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCachePoolResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCachePoolResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ModifyCachePoolResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ModifyCachePoolResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ModifyCachePoolResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCachePoolResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCachePoolResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ModifyCachePoolResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ModifyCachePoolResponseProto) } static { defaultInstance = new ModifyCachePoolResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ModifyCachePoolResponseProto) } public interface RemoveCachePoolRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string poolName = 1; /** * required string poolName = 1; */ boolean hasPoolName(); /** * required string poolName = 1; */ java.lang.String getPoolName(); /** * required string poolName = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPoolNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.RemoveCachePoolRequestProto} */ public static final class RemoveCachePoolRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RemoveCachePoolRequestProtoOrBuilder { // Use RemoveCachePoolRequestProto.newBuilder() to construct. private RemoveCachePoolRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RemoveCachePoolRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RemoveCachePoolRequestProto defaultInstance; public static RemoveCachePoolRequestProto getDefaultInstance() { return defaultInstance; } public RemoveCachePoolRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RemoveCachePoolRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; poolName_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCachePoolRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCachePoolRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RemoveCachePoolRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RemoveCachePoolRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string poolName = 1; public static final int POOLNAME_FIELD_NUMBER = 1; private java.lang.Object poolName_; /** * required string poolName = 1; */ public boolean hasPoolName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string poolName = 1; */ public java.lang.String getPoolName() { java.lang.Object ref = poolName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { poolName_ = s; } return s; } } /** * required string poolName = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPoolNameBytes() { java.lang.Object ref = poolName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); poolName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { poolName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPoolName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getPoolNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getPoolNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto) obj; boolean result = true; result = result && (hasPoolName() == other.hasPoolName()); if (hasPoolName()) { result = result && getPoolName() .equals(other.getPoolName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPoolName()) { hash = (37 * hash) + POOLNAME_FIELD_NUMBER; hash = (53 * hash) + getPoolName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RemoveCachePoolRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCachePoolRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCachePoolRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); poolName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCachePoolRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.poolName_ = poolName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto.getDefaultInstance()) return this; if (other.hasPoolName()) { bitField0_ |= 0x00000001; poolName_ = other.poolName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPoolName()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string poolName = 1; private java.lang.Object poolName_ = ""; /** * required string poolName = 1; */ public boolean hasPoolName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string poolName = 1; */ public java.lang.String getPoolName() { java.lang.Object ref = poolName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); poolName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string poolName = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPoolNameBytes() { java.lang.Object ref = poolName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); poolName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string poolName = 1; */ public Builder setPoolName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; poolName_ = value; onChanged(); return this; } /** * required string poolName = 1; */ public Builder clearPoolName() { bitField0_ = (bitField0_ & ~0x00000001); poolName_ = getDefaultInstance().getPoolName(); onChanged(); return this; } /** * required string poolName = 1; */ public Builder setPoolNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; poolName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RemoveCachePoolRequestProto) } static { defaultInstance = new RemoveCachePoolRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoveCachePoolRequestProto) } public interface RemoveCachePoolResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.RemoveCachePoolResponseProto} * *
   * void response
   * 
*/ public static final class RemoveCachePoolResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RemoveCachePoolResponseProtoOrBuilder { // Use RemoveCachePoolResponseProto.newBuilder() to construct. private RemoveCachePoolResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RemoveCachePoolResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RemoveCachePoolResponseProto defaultInstance; public static RemoveCachePoolResponseProto getDefaultInstance() { return defaultInstance; } public RemoveCachePoolResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RemoveCachePoolResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCachePoolResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCachePoolResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RemoveCachePoolResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RemoveCachePoolResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RemoveCachePoolResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCachePoolResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCachePoolResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RemoveCachePoolResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RemoveCachePoolResponseProto) } static { defaultInstance = new RemoveCachePoolResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoveCachePoolResponseProto) } public interface ListCachePoolsRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string prevPoolName = 1; /** * required string prevPoolName = 1; */ boolean hasPrevPoolName(); /** * required string prevPoolName = 1; */ java.lang.String getPrevPoolName(); /** * required string prevPoolName = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPrevPoolNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.ListCachePoolsRequestProto} */ public static final class ListCachePoolsRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ListCachePoolsRequestProtoOrBuilder { // Use ListCachePoolsRequestProto.newBuilder() to construct. private ListCachePoolsRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ListCachePoolsRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ListCachePoolsRequestProto defaultInstance; public static ListCachePoolsRequestProto getDefaultInstance() { return defaultInstance; } public ListCachePoolsRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ListCachePoolsRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; prevPoolName_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCachePoolsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCachePoolsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ListCachePoolsRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ListCachePoolsRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string prevPoolName = 1; public static final int PREVPOOLNAME_FIELD_NUMBER = 1; private java.lang.Object prevPoolName_; /** * required string prevPoolName = 1; */ public boolean hasPrevPoolName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string prevPoolName = 1; */ public java.lang.String getPrevPoolName() { java.lang.Object ref = prevPoolName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { prevPoolName_ = s; } return s; } } /** * required string prevPoolName = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPrevPoolNameBytes() { java.lang.Object ref = prevPoolName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); prevPoolName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { prevPoolName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPrevPoolName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getPrevPoolNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getPrevPoolNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto) obj; boolean result = true; result = result && (hasPrevPoolName() == other.hasPrevPoolName()); if (hasPrevPoolName()) { result = result && getPrevPoolName() .equals(other.getPrevPoolName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPrevPoolName()) { hash = (37 * hash) + PREVPOOLNAME_FIELD_NUMBER; hash = (53 * hash) + getPrevPoolName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ListCachePoolsRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCachePoolsRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCachePoolsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); prevPoolName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCachePoolsRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.prevPoolName_ = prevPoolName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto.getDefaultInstance()) return this; if (other.hasPrevPoolName()) { bitField0_ |= 0x00000001; prevPoolName_ = other.prevPoolName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPrevPoolName()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string prevPoolName = 1; private java.lang.Object prevPoolName_ = ""; /** * required string prevPoolName = 1; */ public boolean hasPrevPoolName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string prevPoolName = 1; */ public java.lang.String getPrevPoolName() { java.lang.Object ref = prevPoolName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); prevPoolName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string prevPoolName = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPrevPoolNameBytes() { java.lang.Object ref = prevPoolName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); prevPoolName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string prevPoolName = 1; */ public Builder setPrevPoolName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; prevPoolName_ = value; onChanged(); return this; } /** * required string prevPoolName = 1; */ public Builder clearPrevPoolName() { bitField0_ = (bitField0_ & ~0x00000001); prevPoolName_ = getDefaultInstance().getPrevPoolName(); onChanged(); return this; } /** * required string prevPoolName = 1; */ public Builder setPrevPoolNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; prevPoolName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ListCachePoolsRequestProto) } static { defaultInstance = new ListCachePoolsRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ListCachePoolsRequestProto) } public interface ListCachePoolsResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ java.util.List getEntriesList(); /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto getEntries(int index); /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ int getEntriesCount(); /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ java.util.List getEntriesOrBuilderList(); /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProtoOrBuilder getEntriesOrBuilder( int index); // required bool hasMore = 2; /** * required bool hasMore = 2; */ boolean hasHasMore(); /** * required bool hasMore = 2; */ boolean getHasMore(); } /** * Protobuf type {@code hadoop.hdfs.ListCachePoolsResponseProto} */ public static final class ListCachePoolsResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ListCachePoolsResponseProtoOrBuilder { // Use ListCachePoolsResponseProto.newBuilder() to construct. private ListCachePoolsResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ListCachePoolsResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ListCachePoolsResponseProto defaultInstance; public static ListCachePoolsResponseProto getDefaultInstance() { return defaultInstance; } public ListCachePoolsResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ListCachePoolsResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { entries_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } entries_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.PARSER, extensionRegistry)); break; } case 16: { bitField0_ |= 0x00000001; hasMore_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { entries_ = java.util.Collections.unmodifiableList(entries_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCachePoolsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCachePoolsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ListCachePoolsResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ListCachePoolsResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; public static final int ENTRIES_FIELD_NUMBER = 1; private java.util.List entries_; /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public java.util.List getEntriesList() { return entries_; } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public java.util.List getEntriesOrBuilderList() { return entries_; } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public int getEntriesCount() { return entries_.size(); } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto getEntries(int index) { return entries_.get(index); } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProtoOrBuilder getEntriesOrBuilder( int index) { return entries_.get(index); } // required bool hasMore = 2; public static final int HASMORE_FIELD_NUMBER = 2; private boolean hasMore_; /** * required bool hasMore = 2; */ public boolean hasHasMore() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool hasMore = 2; */ public boolean getHasMore() { return hasMore_; } private void initFields() { entries_ = java.util.Collections.emptyList(); hasMore_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasHasMore()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getEntriesCount(); i++) { if (!getEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < entries_.size(); i++) { output.writeMessage(1, entries_.get(i)); } if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(2, hasMore_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < entries_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, entries_.get(i)); } if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(2, hasMore_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto) obj; boolean result = true; result = result && getEntriesList() .equals(other.getEntriesList()); result = result && (hasHasMore() == other.hasHasMore()); if (hasHasMore()) { result = result && (getHasMore() == other.getHasMore()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getEntriesCount() > 0) { hash = (37 * hash) + ENTRIES_FIELD_NUMBER; hash = (53 * hash) + getEntriesList().hashCode(); } if (hasHasMore()) { hash = (37 * hash) + HASMORE_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getHasMore()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ListCachePoolsResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCachePoolsResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCachePoolsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getEntriesFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (entriesBuilder_ == null) { entries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { entriesBuilder_.clear(); } hasMore_ = false; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListCachePoolsResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (entriesBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { entries_ = java.util.Collections.unmodifiableList(entries_); bitField0_ = (bitField0_ & ~0x00000001); } result.entries_ = entries_; } else { result.entries_ = entriesBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000001; } result.hasMore_ = hasMore_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.getDefaultInstance()) return this; if (entriesBuilder_ == null) { if (!other.entries_.isEmpty()) { if (entries_.isEmpty()) { entries_ = other.entries_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureEntriesIsMutable(); entries_.addAll(other.entries_); } onChanged(); } } else { if (!other.entries_.isEmpty()) { if (entriesBuilder_.isEmpty()) { entriesBuilder_.dispose(); entriesBuilder_ = null; entries_ = other.entries_; bitField0_ = (bitField0_ & ~0x00000001); entriesBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getEntriesFieldBuilder() : null; } else { entriesBuilder_.addAllMessages(other.entries_); } } } if (other.hasHasMore()) { setHasMore(other.getHasMore()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasHasMore()) { return false; } for (int i = 0; i < getEntriesCount(); i++) { if (!getEntries(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; private java.util.List entries_ = java.util.Collections.emptyList(); private void ensureEntriesIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { entries_ = new java.util.ArrayList(entries_); bitField0_ |= 0x00000001; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProtoOrBuilder> entriesBuilder_; /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public java.util.List getEntriesList() { if (entriesBuilder_ == null) { return java.util.Collections.unmodifiableList(entries_); } else { return entriesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public int getEntriesCount() { if (entriesBuilder_ == null) { return entries_.size(); } else { return entriesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto getEntries(int index) { if (entriesBuilder_ == null) { return entries_.get(index); } else { return entriesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public Builder setEntries( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto value) { if (entriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureEntriesIsMutable(); entries_.set(index, value); onChanged(); } else { entriesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public Builder setEntries( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.Builder builderForValue) { if (entriesBuilder_ == null) { ensureEntriesIsMutable(); entries_.set(index, builderForValue.build()); onChanged(); } else { entriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public Builder addEntries(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto value) { if (entriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureEntriesIsMutable(); entries_.add(value); onChanged(); } else { entriesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public Builder addEntries( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto value) { if (entriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureEntriesIsMutable(); entries_.add(index, value); onChanged(); } else { entriesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public Builder addEntries( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.Builder builderForValue) { if (entriesBuilder_ == null) { ensureEntriesIsMutable(); entries_.add(builderForValue.build()); onChanged(); } else { entriesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public Builder addEntries( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.Builder builderForValue) { if (entriesBuilder_ == null) { ensureEntriesIsMutable(); entries_.add(index, builderForValue.build()); onChanged(); } else { entriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public Builder addAllEntries( java.lang.Iterable values) { if (entriesBuilder_ == null) { ensureEntriesIsMutable(); super.addAll(values, entries_); onChanged(); } else { entriesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public Builder clearEntries() { if (entriesBuilder_ == null) { entries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { entriesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public Builder removeEntries(int index) { if (entriesBuilder_ == null) { ensureEntriesIsMutable(); entries_.remove(index); onChanged(); } else { entriesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.Builder getEntriesBuilder( int index) { return getEntriesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProtoOrBuilder getEntriesOrBuilder( int index) { if (entriesBuilder_ == null) { return entries_.get(index); } else { return entriesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public java.util.List getEntriesOrBuilderList() { if (entriesBuilder_ != null) { return entriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(entries_); } } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.Builder addEntriesBuilder() { return getEntriesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.Builder addEntriesBuilder( int index) { return getEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.CachePoolEntryProto entries = 1; */ public java.util.List getEntriesBuilderList() { return getEntriesFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProtoOrBuilder> getEntriesFieldBuilder() { if (entriesBuilder_ == null) { entriesBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProtoOrBuilder>( entries_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); entries_ = null; } return entriesBuilder_; } // required bool hasMore = 2; private boolean hasMore_ ; /** * required bool hasMore = 2; */ public boolean hasHasMore() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bool hasMore = 2; */ public boolean getHasMore() { return hasMore_; } /** * required bool hasMore = 2; */ public Builder setHasMore(boolean value) { bitField0_ |= 0x00000002; hasMore_ = value; onChanged(); return this; } /** * required bool hasMore = 2; */ public Builder clearHasMore() { bitField0_ = (bitField0_ & ~0x00000002); hasMore_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ListCachePoolsResponseProto) } static { defaultInstance = new ListCachePoolsResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ListCachePoolsResponseProto) } public interface CachePoolEntryProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.CachePoolInfoProto info = 1; /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ boolean hasInfo(); /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto getInfo(); /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder getInfoOrBuilder(); // required .hadoop.hdfs.CachePoolStatsProto stats = 2; /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ boolean hasStats(); /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto getStats(); /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProtoOrBuilder getStatsOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.CachePoolEntryProto} */ public static final class CachePoolEntryProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CachePoolEntryProtoOrBuilder { // Use CachePoolEntryProto.newBuilder() to construct. private CachePoolEntryProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CachePoolEntryProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CachePoolEntryProto defaultInstance; public static CachePoolEntryProto getDefaultInstance() { return defaultInstance; } public CachePoolEntryProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CachePoolEntryProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = info_.toBuilder(); } info_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(info_); info_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = stats_.toBuilder(); } stats_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(stats_); stats_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolEntryProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CachePoolEntryProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CachePoolEntryProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.CachePoolInfoProto info = 1; public static final int INFO_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto info_; /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto getInfo() { return info_; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder getInfoOrBuilder() { return info_; } // required .hadoop.hdfs.CachePoolStatsProto stats = 2; public static final int STATS_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto stats_; /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ public boolean hasStats() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto getStats() { return stats_; } /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProtoOrBuilder getStatsOrBuilder() { return stats_; } private void initFields() { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); stats_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasInfo()) { memoizedIsInitialized = 0; return false; } if (!hasStats()) { memoizedIsInitialized = 0; return false; } if (!getStats().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, info_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, stats_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, info_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(2, stats_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto) obj; boolean result = true; result = result && (hasInfo() == other.hasInfo()); if (hasInfo()) { result = result && getInfo() .equals(other.getInfo()); } result = result && (hasStats() == other.hasStats()); if (hasStats()) { result = result && getStats() .equals(other.getStats()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasInfo()) { hash = (37 * hash) + INFO_FIELD_NUMBER; hash = (53 * hash) + getInfo().hashCode(); } if (hasStats()) { hash = (37 * hash) + STATS_FIELD_NUMBER; hash = (53 * hash) + getStats().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CachePoolEntryProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolEntryProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolEntryProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getInfoFieldBuilder(); getStatsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (statsBuilder_ == null) { stats_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.getDefaultInstance(); } else { statsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CachePoolEntryProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (infoBuilder_ == null) { result.info_ = info_; } else { result.info_ = infoBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (statsBuilder_ == null) { result.stats_ = stats_; } else { result.stats_ = statsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto.getDefaultInstance()) return this; if (other.hasInfo()) { mergeInfo(other.getInfo()); } if (other.hasStats()) { mergeStats(other.getStats()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasInfo()) { return false; } if (!hasStats()) { return false; } if (!getStats().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.CachePoolInfoProto info = 1; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder> infoBuilder_; /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public boolean hasInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto getInfo() { if (infoBuilder_ == null) { return info_; } else { return infoBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder setInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto value) { if (infoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } info_ = value; onChanged(); } else { infoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder setInfo( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder builderForValue) { if (infoBuilder_ == null) { info_ = builderForValue.build(); onChanged(); } else { infoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder mergeInfo(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto value) { if (infoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && info_ != org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance()) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.newBuilder(info_).mergeFrom(value).buildPartial(); } else { info_ = value; } onChanged(); } else { infoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public Builder clearInfo() { if (infoBuilder_ == null) { info_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.getDefaultInstance(); onChanged(); } else { infoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder getInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getInfoFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder getInfoOrBuilder() { if (infoBuilder_ != null) { return infoBuilder_.getMessageOrBuilder(); } else { return info_; } } /** * required .hadoop.hdfs.CachePoolInfoProto info = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder> getInfoFieldBuilder() { if (infoBuilder_ == null) { infoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProtoOrBuilder>( info_, getParentForChildren(), isClean()); info_ = null; } return infoBuilder_; } // required .hadoop.hdfs.CachePoolStatsProto stats = 2; private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto stats_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProtoOrBuilder> statsBuilder_; /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ public boolean hasStats() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto getStats() { if (statsBuilder_ == null) { return stats_; } else { return statsBuilder_.getMessage(); } } /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ public Builder setStats(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto value) { if (statsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } stats_ = value; onChanged(); } else { statsBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ public Builder setStats( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.Builder builderForValue) { if (statsBuilder_ == null) { stats_ = builderForValue.build(); onChanged(); } else { statsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ public Builder mergeStats(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto value) { if (statsBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && stats_ != org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.getDefaultInstance()) { stats_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.newBuilder(stats_).mergeFrom(value).buildPartial(); } else { stats_ = value; } onChanged(); } else { statsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ public Builder clearStats() { if (statsBuilder_ == null) { stats_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.getDefaultInstance(); onChanged(); } else { statsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.Builder getStatsBuilder() { bitField0_ |= 0x00000002; onChanged(); return getStatsFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProtoOrBuilder getStatsOrBuilder() { if (statsBuilder_ != null) { return statsBuilder_.getMessageOrBuilder(); } else { return stats_; } } /** * required .hadoop.hdfs.CachePoolStatsProto stats = 2; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProtoOrBuilder> getStatsFieldBuilder() { if (statsBuilder_ == null) { statsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProtoOrBuilder>( stats_, getParentForChildren(), isClean()); stats_ = null; } return statsBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CachePoolEntryProto) } static { defaultInstance = new CachePoolEntryProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CachePoolEntryProto) } public interface GetFileLinkInfoRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); } /** * Protobuf type {@code hadoop.hdfs.GetFileLinkInfoRequestProto} */ public static final class GetFileLinkInfoRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetFileLinkInfoRequestProtoOrBuilder { // Use GetFileLinkInfoRequestProto.newBuilder() to construct. private GetFileLinkInfoRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetFileLinkInfoRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetFileLinkInfoRequestProto defaultInstance; public static GetFileLinkInfoRequestProto getDefaultInstance() { return defaultInstance; } public GetFileLinkInfoRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFileLinkInfoRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileLinkInfoRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileLinkInfoRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetFileLinkInfoRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetFileLinkInfoRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { src_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetFileLinkInfoRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileLinkInfoRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileLinkInfoRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileLinkInfoRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetFileLinkInfoRequestProto) } static { defaultInstance = new GetFileLinkInfoRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetFileLinkInfoRequestProto) } public interface GetFileLinkInfoResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ boolean hasFs(); /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs(); /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetFileLinkInfoResponseProto} */ public static final class GetFileLinkInfoResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetFileLinkInfoResponseProtoOrBuilder { // Use GetFileLinkInfoResponseProto.newBuilder() to construct. private GetFileLinkInfoResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetFileLinkInfoResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetFileLinkInfoResponseProto defaultInstance; public static GetFileLinkInfoResponseProto getDefaultInstance() { return defaultInstance; } public GetFileLinkInfoResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetFileLinkInfoResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = fs_.toBuilder(); } fs_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(fs_); fs_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileLinkInfoResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileLinkInfoResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetFileLinkInfoResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetFileLinkInfoResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; public static final int FS_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public boolean hasFs() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { return fs_; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { return fs_; } private void initFields() { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (hasFs()) { if (!getFs().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, fs_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, fs_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto) obj; boolean result = true; result = result && (hasFs() == other.hasFs()); if (hasFs()) { result = result && getFs() .equals(other.getFs()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasFs()) { hash = (37 * hash) + FS_FIELD_NUMBER; hash = (53 * hash) + getFs().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetFileLinkInfoResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileLinkInfoResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileLinkInfoResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getFsFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (fsBuilder_ == null) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); } else { fsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetFileLinkInfoResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (fsBuilder_ == null) { result.fs_ = fs_; } else { result.fs_ = fsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance()) return this; if (other.hasFs()) { mergeFs(other.getFs()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (hasFs()) { if (!getFs().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> fsBuilder_; /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public boolean hasFs() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { if (fsBuilder_ == null) { return fs_; } else { return fsBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder setFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (fsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } fs_ = value; onChanged(); } else { fsBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder setFs( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { if (fsBuilder_ == null) { fs_ = builderForValue.build(); onChanged(); } else { fsBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder mergeFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { if (fsBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && fs_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(fs_).mergeFrom(value).buildPartial(); } else { fs_ = value; } onChanged(); } else { fsBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public Builder clearFs() { if (fsBuilder_ == null) { fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); onChanged(); } else { fsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getFsBuilder() { bitField0_ |= 0x00000001; onChanged(); return getFsFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { if (fsBuilder_ != null) { return fsBuilder_.getMessageOrBuilder(); } else { return fs_; } } /** * optional .hadoop.hdfs.HdfsFileStatusProto fs = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> getFsFieldBuilder() { if (fsBuilder_ == null) { fsBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( fs_, getParentForChildren(), isClean()); fs_ = null; } return fsBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetFileLinkInfoResponseProto) } static { defaultInstance = new GetFileLinkInfoResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetFileLinkInfoResponseProto) } public interface GetContentSummaryRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string path = 1; /** * required string path = 1; */ boolean hasPath(); /** * required string path = 1; */ java.lang.String getPath(); /** * required string path = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes(); } /** * Protobuf type {@code hadoop.hdfs.GetContentSummaryRequestProto} */ public static final class GetContentSummaryRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetContentSummaryRequestProtoOrBuilder { // Use GetContentSummaryRequestProto.newBuilder() to construct. private GetContentSummaryRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetContentSummaryRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetContentSummaryRequestProto defaultInstance; public static GetContentSummaryRequestProto getDefaultInstance() { return defaultInstance; } public GetContentSummaryRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetContentSummaryRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; path_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetContentSummaryRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetContentSummaryRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetContentSummaryRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetContentSummaryRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string path = 1; public static final int PATH_FIELD_NUMBER = 1; private java.lang.Object path_; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { path_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPath()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getPathBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getPathBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto) obj; boolean result = true; result = result && (hasPath() == other.hasPath()); if (hasPath()) { result = result && getPath() .equals(other.getPath()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetContentSummaryRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetContentSummaryRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetContentSummaryRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetContentSummaryRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.path_ = path_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.getDefaultInstance()) return this; if (other.hasPath()) { bitField0_ |= 0x00000001; path_ = other.path_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPath()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string path = 1; private java.lang.Object path_ = ""; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); path_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string path = 1; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } /** * required string path = 1; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000001); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * required string path = 1; */ public Builder setPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetContentSummaryRequestProto) } static { defaultInstance = new GetContentSummaryRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetContentSummaryRequestProto) } public interface GetContentSummaryResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.ContentSummaryProto summary = 1; /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ boolean hasSummary(); /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getSummary(); /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder getSummaryOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetContentSummaryResponseProto} */ public static final class GetContentSummaryResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetContentSummaryResponseProtoOrBuilder { // Use GetContentSummaryResponseProto.newBuilder() to construct. private GetContentSummaryResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetContentSummaryResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetContentSummaryResponseProto defaultInstance; public static GetContentSummaryResponseProto getDefaultInstance() { return defaultInstance; } public GetContentSummaryResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetContentSummaryResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = summary_.toBuilder(); } summary_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(summary_); summary_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetContentSummaryResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetContentSummaryResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetContentSummaryResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetContentSummaryResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.ContentSummaryProto summary = 1; public static final int SUMMARY_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto summary_; /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ public boolean hasSummary() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getSummary() { return summary_; } /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder getSummaryOrBuilder() { return summary_; } private void initFields() { summary_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSummary()) { memoizedIsInitialized = 0; return false; } if (!getSummary().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, summary_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, summary_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto) obj; boolean result = true; result = result && (hasSummary() == other.hasSummary()); if (hasSummary()) { result = result && getSummary() .equals(other.getSummary()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSummary()) { hash = (37 * hash) + SUMMARY_FIELD_NUMBER; hash = (53 * hash) + getSummary().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetContentSummaryResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetContentSummaryResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetContentSummaryResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getSummaryFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (summaryBuilder_ == null) { summary_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); } else { summaryBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetContentSummaryResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (summaryBuilder_ == null) { result.summary_ = summary_; } else { result.summary_ = summaryBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance()) return this; if (other.hasSummary()) { mergeSummary(other.getSummary()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSummary()) { return false; } if (!getSummary().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.ContentSummaryProto summary = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto summary_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder> summaryBuilder_; /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ public boolean hasSummary() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getSummary() { if (summaryBuilder_ == null) { return summary_; } else { return summaryBuilder_.getMessage(); } } /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ public Builder setSummary(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto value) { if (summaryBuilder_ == null) { if (value == null) { throw new NullPointerException(); } summary_ = value; onChanged(); } else { summaryBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ public Builder setSummary( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder builderForValue) { if (summaryBuilder_ == null) { summary_ = builderForValue.build(); onChanged(); } else { summaryBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ public Builder mergeSummary(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto value) { if (summaryBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && summary_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance()) { summary_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder(summary_).mergeFrom(value).buildPartial(); } else { summary_ = value; } onChanged(); } else { summaryBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ public Builder clearSummary() { if (summaryBuilder_ == null) { summary_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); onChanged(); } else { summaryBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder getSummaryBuilder() { bitField0_ |= 0x00000001; onChanged(); return getSummaryFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder getSummaryOrBuilder() { if (summaryBuilder_ != null) { return summaryBuilder_.getMessageOrBuilder(); } else { return summary_; } } /** * required .hadoop.hdfs.ContentSummaryProto summary = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder> getSummaryFieldBuilder() { if (summaryBuilder_ == null) { summaryBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder>( summary_, getParentForChildren(), isClean()); summary_ = null; } return summaryBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetContentSummaryResponseProto) } static { defaultInstance = new GetContentSummaryResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetContentSummaryResponseProto) } public interface GetQuotaUsageRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string path = 1; /** * required string path = 1; */ boolean hasPath(); /** * required string path = 1; */ java.lang.String getPath(); /** * required string path = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes(); } /** * Protobuf type {@code hadoop.hdfs.GetQuotaUsageRequestProto} */ public static final class GetQuotaUsageRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetQuotaUsageRequestProtoOrBuilder { // Use GetQuotaUsageRequestProto.newBuilder() to construct. private GetQuotaUsageRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetQuotaUsageRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetQuotaUsageRequestProto defaultInstance; public static GetQuotaUsageRequestProto getDefaultInstance() { return defaultInstance; } public GetQuotaUsageRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetQuotaUsageRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; path_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetQuotaUsageRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetQuotaUsageRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetQuotaUsageRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetQuotaUsageRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string path = 1; public static final int PATH_FIELD_NUMBER = 1; private java.lang.Object path_; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { path_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPath()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getPathBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getPathBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto) obj; boolean result = true; result = result && (hasPath() == other.hasPath()); if (hasPath()) { result = result && getPath() .equals(other.getPath()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetQuotaUsageRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetQuotaUsageRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetQuotaUsageRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetQuotaUsageRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.path_ = path_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto.getDefaultInstance()) return this; if (other.hasPath()) { bitField0_ |= 0x00000001; path_ = other.path_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPath()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string path = 1; private java.lang.Object path_ = ""; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); path_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string path = 1; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } /** * required string path = 1; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000001); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * required string path = 1; */ public Builder setPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetQuotaUsageRequestProto) } static { defaultInstance = new GetQuotaUsageRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetQuotaUsageRequestProto) } public interface GetQuotaUsageResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.QuotaUsageProto usage = 1; /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ boolean hasUsage(); /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getUsage(); /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProtoOrBuilder getUsageOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetQuotaUsageResponseProto} */ public static final class GetQuotaUsageResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetQuotaUsageResponseProtoOrBuilder { // Use GetQuotaUsageResponseProto.newBuilder() to construct. private GetQuotaUsageResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetQuotaUsageResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetQuotaUsageResponseProto defaultInstance; public static GetQuotaUsageResponseProto getDefaultInstance() { return defaultInstance; } public GetQuotaUsageResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetQuotaUsageResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = usage_.toBuilder(); } usage_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(usage_); usage_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetQuotaUsageResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetQuotaUsageResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetQuotaUsageResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetQuotaUsageResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.QuotaUsageProto usage = 1; public static final int USAGE_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto usage_; /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ public boolean hasUsage() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getUsage() { return usage_; } /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProtoOrBuilder getUsageOrBuilder() { return usage_; } private void initFields() { usage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasUsage()) { memoizedIsInitialized = 0; return false; } if (!getUsage().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, usage_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, usage_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto) obj; boolean result = true; result = result && (hasUsage() == other.hasUsage()); if (hasUsage()) { result = result && getUsage() .equals(other.getUsage()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasUsage()) { hash = (37 * hash) + USAGE_FIELD_NUMBER; hash = (53 * hash) + getUsage().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetQuotaUsageResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetQuotaUsageResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetQuotaUsageResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getUsageFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (usageBuilder_ == null) { usage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance(); } else { usageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetQuotaUsageResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (usageBuilder_ == null) { result.usage_ = usage_; } else { result.usage_ = usageBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.getDefaultInstance()) return this; if (other.hasUsage()) { mergeUsage(other.getUsage()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasUsage()) { return false; } if (!getUsage().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.QuotaUsageProto usage = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto usage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProtoOrBuilder> usageBuilder_; /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ public boolean hasUsage() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getUsage() { if (usageBuilder_ == null) { return usage_; } else { return usageBuilder_.getMessage(); } } /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ public Builder setUsage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto value) { if (usageBuilder_ == null) { if (value == null) { throw new NullPointerException(); } usage_ = value; onChanged(); } else { usageBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ public Builder setUsage( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder builderForValue) { if (usageBuilder_ == null) { usage_ = builderForValue.build(); onChanged(); } else { usageBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ public Builder mergeUsage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto value) { if (usageBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && usage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance()) { usage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.newBuilder(usage_).mergeFrom(value).buildPartial(); } else { usage_ = value; } onChanged(); } else { usageBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ public Builder clearUsage() { if (usageBuilder_ == null) { usage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance(); onChanged(); } else { usageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder getUsageBuilder() { bitField0_ |= 0x00000001; onChanged(); return getUsageFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProtoOrBuilder getUsageOrBuilder() { if (usageBuilder_ != null) { return usageBuilder_.getMessageOrBuilder(); } else { return usage_; } } /** * required .hadoop.hdfs.QuotaUsageProto usage = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProtoOrBuilder> getUsageFieldBuilder() { if (usageBuilder_ == null) { usageBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProtoOrBuilder>( usage_, getParentForChildren(), isClean()); usage_ = null; } return usageBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetQuotaUsageResponseProto) } static { defaultInstance = new GetQuotaUsageResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetQuotaUsageResponseProto) } public interface SetQuotaRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string path = 1; /** * required string path = 1; */ boolean hasPath(); /** * required string path = 1; */ java.lang.String getPath(); /** * required string path = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes(); // required uint64 namespaceQuota = 2; /** * required uint64 namespaceQuota = 2; */ boolean hasNamespaceQuota(); /** * required uint64 namespaceQuota = 2; */ long getNamespaceQuota(); // required uint64 storagespaceQuota = 3; /** * required uint64 storagespaceQuota = 3; */ boolean hasStoragespaceQuota(); /** * required uint64 storagespaceQuota = 3; */ long getStoragespaceQuota(); // optional .hadoop.hdfs.StorageTypeProto storageType = 4; /** * optional .hadoop.hdfs.StorageTypeProto storageType = 4; */ boolean hasStorageType(); /** * optional .hadoop.hdfs.StorageTypeProto storageType = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType(); } /** * Protobuf type {@code hadoop.hdfs.SetQuotaRequestProto} */ public static final class SetQuotaRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetQuotaRequestProtoOrBuilder { // Use SetQuotaRequestProto.newBuilder() to construct. private SetQuotaRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetQuotaRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetQuotaRequestProto defaultInstance; public static SetQuotaRequestProto getDefaultInstance() { return defaultInstance; } public SetQuotaRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetQuotaRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; path_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; namespaceQuota_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; storagespaceQuota_ = input.readUInt64(); break; } case 32: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(4, rawValue); } else { bitField0_ |= 0x00000008; storageType_ = value; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetQuotaRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetQuotaRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetQuotaRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetQuotaRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string path = 1; public static final int PATH_FIELD_NUMBER = 1; private java.lang.Object path_; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required uint64 namespaceQuota = 2; public static final int NAMESPACEQUOTA_FIELD_NUMBER = 2; private long namespaceQuota_; /** * required uint64 namespaceQuota = 2; */ public boolean hasNamespaceQuota() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 namespaceQuota = 2; */ public long getNamespaceQuota() { return namespaceQuota_; } // required uint64 storagespaceQuota = 3; public static final int STORAGESPACEQUOTA_FIELD_NUMBER = 3; private long storagespaceQuota_; /** * required uint64 storagespaceQuota = 3; */ public boolean hasStoragespaceQuota() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 storagespaceQuota = 3; */ public long getStoragespaceQuota() { return storagespaceQuota_; } // optional .hadoop.hdfs.StorageTypeProto storageType = 4; public static final int STORAGETYPE_FIELD_NUMBER = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_; /** * optional .hadoop.hdfs.StorageTypeProto storageType = 4; */ public boolean hasStorageType() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { return storageType_; } private void initFields() { path_ = ""; namespaceQuota_ = 0L; storagespaceQuota_ = 0L; storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPath()) { memoizedIsInitialized = 0; return false; } if (!hasNamespaceQuota()) { memoizedIsInitialized = 0; return false; } if (!hasStoragespaceQuota()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getPathBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, namespaceQuota_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, storagespaceQuota_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeEnum(4, storageType_.getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getPathBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(2, namespaceQuota_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(3, storagespaceQuota_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeEnumSize(4, storageType_.getNumber()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto) obj; boolean result = true; result = result && (hasPath() == other.hasPath()); if (hasPath()) { result = result && getPath() .equals(other.getPath()); } result = result && (hasNamespaceQuota() == other.hasNamespaceQuota()); if (hasNamespaceQuota()) { result = result && (getNamespaceQuota() == other.getNamespaceQuota()); } result = result && (hasStoragespaceQuota() == other.hasStoragespaceQuota()); if (hasStoragespaceQuota()) { result = result && (getStoragespaceQuota() == other.getStoragespaceQuota()); } result = result && (hasStorageType() == other.hasStorageType()); if (hasStorageType()) { result = result && (getStorageType() == other.getStorageType()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasNamespaceQuota()) { hash = (37 * hash) + NAMESPACEQUOTA_FIELD_NUMBER; hash = (53 * hash) + hashLong(getNamespaceQuota()); } if (hasStoragespaceQuota()) { hash = (37 * hash) + STORAGESPACEQUOTA_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStoragespaceQuota()); } if (hasStorageType()) { hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getStorageType()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetQuotaRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetQuotaRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetQuotaRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); namespaceQuota_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); storagespaceQuota_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetQuotaRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.path_ = path_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.namespaceQuota_ = namespaceQuota_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.storagespaceQuota_ = storagespaceQuota_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.storageType_ = storageType_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.getDefaultInstance()) return this; if (other.hasPath()) { bitField0_ |= 0x00000001; path_ = other.path_; onChanged(); } if (other.hasNamespaceQuota()) { setNamespaceQuota(other.getNamespaceQuota()); } if (other.hasStoragespaceQuota()) { setStoragespaceQuota(other.getStoragespaceQuota()); } if (other.hasStorageType()) { setStorageType(other.getStorageType()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPath()) { return false; } if (!hasNamespaceQuota()) { return false; } if (!hasStoragespaceQuota()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string path = 1; private java.lang.Object path_ = ""; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); path_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string path = 1; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } /** * required string path = 1; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000001); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * required string path = 1; */ public Builder setPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } // required uint64 namespaceQuota = 2; private long namespaceQuota_ ; /** * required uint64 namespaceQuota = 2; */ public boolean hasNamespaceQuota() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 namespaceQuota = 2; */ public long getNamespaceQuota() { return namespaceQuota_; } /** * required uint64 namespaceQuota = 2; */ public Builder setNamespaceQuota(long value) { bitField0_ |= 0x00000002; namespaceQuota_ = value; onChanged(); return this; } /** * required uint64 namespaceQuota = 2; */ public Builder clearNamespaceQuota() { bitField0_ = (bitField0_ & ~0x00000002); namespaceQuota_ = 0L; onChanged(); return this; } // required uint64 storagespaceQuota = 3; private long storagespaceQuota_ ; /** * required uint64 storagespaceQuota = 3; */ public boolean hasStoragespaceQuota() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 storagespaceQuota = 3; */ public long getStoragespaceQuota() { return storagespaceQuota_; } /** * required uint64 storagespaceQuota = 3; */ public Builder setStoragespaceQuota(long value) { bitField0_ |= 0x00000004; storagespaceQuota_ = value; onChanged(); return this; } /** * required uint64 storagespaceQuota = 3; */ public Builder clearStoragespaceQuota() { bitField0_ = (bitField0_ & ~0x00000004); storagespaceQuota_ = 0L; onChanged(); return this; } // optional .hadoop.hdfs.StorageTypeProto storageType = 4; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; /** * optional .hadoop.hdfs.StorageTypeProto storageType = 4; */ public boolean hasStorageType() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() { return storageType_; } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 4; */ public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; storageType_ = value; onChanged(); return this; } /** * optional .hadoop.hdfs.StorageTypeProto storageType = 4; */ public Builder clearStorageType() { bitField0_ = (bitField0_ & ~0x00000008); storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetQuotaRequestProto) } static { defaultInstance = new SetQuotaRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetQuotaRequestProto) } public interface SetQuotaResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.SetQuotaResponseProto} * *
   * void response
   * 
*/ public static final class SetQuotaResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetQuotaResponseProtoOrBuilder { // Use SetQuotaResponseProto.newBuilder() to construct. private SetQuotaResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetQuotaResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetQuotaResponseProto defaultInstance; public static SetQuotaResponseProto getDefaultInstance() { return defaultInstance; } public SetQuotaResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetQuotaResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetQuotaResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetQuotaResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetQuotaResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetQuotaResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetQuotaResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetQuotaResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetQuotaResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetQuotaResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetQuotaResponseProto) } static { defaultInstance = new SetQuotaResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetQuotaResponseProto) } public interface FsyncRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required string client = 2; /** * required string client = 2; */ boolean hasClient(); /** * required string client = 2; */ java.lang.String getClient(); /** * required string client = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientBytes(); // optional sint64 lastBlockLength = 3 [default = -1]; /** * optional sint64 lastBlockLength = 3 [default = -1]; */ boolean hasLastBlockLength(); /** * optional sint64 lastBlockLength = 3 [default = -1]; */ long getLastBlockLength(); // optional uint64 fileId = 4 [default = 0]; /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ boolean hasFileId(); /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ long getFileId(); } /** * Protobuf type {@code hadoop.hdfs.FsyncRequestProto} */ public static final class FsyncRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements FsyncRequestProtoOrBuilder { // Use FsyncRequestProto.newBuilder() to construct. private FsyncRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private FsyncRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final FsyncRequestProto defaultInstance; public static FsyncRequestProto getDefaultInstance() { return defaultInstance; } public FsyncRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FsyncRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; client_ = input.readBytes(); break; } case 24: { bitField0_ |= 0x00000004; lastBlockLength_ = input.readSInt64(); break; } case 32: { bitField0_ |= 0x00000008; fileId_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FsyncRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FsyncRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public FsyncRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new FsyncRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string client = 2; public static final int CLIENT_FIELD_NUMBER = 2; private java.lang.Object client_; /** * required string client = 2; */ public boolean hasClient() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string client = 2; */ public java.lang.String getClient() { java.lang.Object ref = client_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { client_ = s; } return s; } } /** * required string client = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientBytes() { java.lang.Object ref = client_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); client_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional sint64 lastBlockLength = 3 [default = -1]; public static final int LASTBLOCKLENGTH_FIELD_NUMBER = 3; private long lastBlockLength_; /** * optional sint64 lastBlockLength = 3 [default = -1]; */ public boolean hasLastBlockLength() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional sint64 lastBlockLength = 3 [default = -1]; */ public long getLastBlockLength() { return lastBlockLength_; } // optional uint64 fileId = 4 [default = 0]; public static final int FILEID_FIELD_NUMBER = 4; private long fileId_; /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 fileId = 4 [default = 0]; * *
     * default to GRANDFATHER_INODE_ID
     * 
*/ public long getFileId() { return fileId_; } private void initFields() { src_ = ""; client_ = ""; lastBlockLength_ = -1L; fileId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasClient()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getClientBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeSInt64(3, lastBlockLength_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt64(4, fileId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getClientBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeSInt64Size(3, lastBlockLength_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(4, fileId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasClient() == other.hasClient()); if (hasClient()) { result = result && getClient() .equals(other.getClient()); } result = result && (hasLastBlockLength() == other.hasLastBlockLength()); if (hasLastBlockLength()) { result = result && (getLastBlockLength() == other.getLastBlockLength()); } result = result && (hasFileId() == other.hasFileId()); if (hasFileId()) { result = result && (getFileId() == other.getFileId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasClient()) { hash = (37 * hash) + CLIENT_FIELD_NUMBER; hash = (53 * hash) + getClient().hashCode(); } if (hasLastBlockLength()) { hash = (37 * hash) + LASTBLOCKLENGTH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastBlockLength()); } if (hasFileId()) { hash = (37 * hash) + FILEID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getFileId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.FsyncRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FsyncRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FsyncRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); client_ = ""; bitField0_ = (bitField0_ & ~0x00000002); lastBlockLength_ = -1L; bitField0_ = (bitField0_ & ~0x00000004); fileId_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FsyncRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.client_ = client_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.lastBlockLength_ = lastBlockLength_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.fileId_ = fileId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasClient()) { bitField0_ |= 0x00000002; client_ = other.client_; onChanged(); } if (other.hasLastBlockLength()) { setLastBlockLength(other.getLastBlockLength()); } if (other.hasFileId()) { setFileId(other.getFileId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasClient()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required string client = 2; private java.lang.Object client_ = ""; /** * required string client = 2; */ public boolean hasClient() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string client = 2; */ public java.lang.String getClient() { java.lang.Object ref = client_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); client_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string client = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientBytes() { java.lang.Object ref = client_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); client_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string client = 2; */ public Builder setClient( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; client_ = value; onChanged(); return this; } /** * required string client = 2; */ public Builder clearClient() { bitField0_ = (bitField0_ & ~0x00000002); client_ = getDefaultInstance().getClient(); onChanged(); return this; } /** * required string client = 2; */ public Builder setClientBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; client_ = value; onChanged(); return this; } // optional sint64 lastBlockLength = 3 [default = -1]; private long lastBlockLength_ = -1L; /** * optional sint64 lastBlockLength = 3 [default = -1]; */ public boolean hasLastBlockLength() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional sint64 lastBlockLength = 3 [default = -1]; */ public long getLastBlockLength() { return lastBlockLength_; } /** * optional sint64 lastBlockLength = 3 [default = -1]; */ public Builder setLastBlockLength(long value) { bitField0_ |= 0x00000004; lastBlockLength_ = value; onChanged(); return this; } /** * optional sint64 lastBlockLength = 3 [default = -1]; */ public Builder clearLastBlockLength() { bitField0_ = (bitField0_ & ~0x00000004); lastBlockLength_ = -1L; onChanged(); return this; } // optional uint64 fileId = 4 [default = 0]; private long fileId_ ; /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public boolean hasFileId() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public long getFileId() { return fileId_; } /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public Builder setFileId(long value) { bitField0_ |= 0x00000008; fileId_ = value; onChanged(); return this; } /** * optional uint64 fileId = 4 [default = 0]; * *
       * default to GRANDFATHER_INODE_ID
       * 
*/ public Builder clearFileId() { bitField0_ = (bitField0_ & ~0x00000008); fileId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FsyncRequestProto) } static { defaultInstance = new FsyncRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.FsyncRequestProto) } public interface FsyncResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.FsyncResponseProto} * *
   * void response
   * 
*/ public static final class FsyncResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements FsyncResponseProtoOrBuilder { // Use FsyncResponseProto.newBuilder() to construct. private FsyncResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private FsyncResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final FsyncResponseProto defaultInstance; public static FsyncResponseProto getDefaultInstance() { return defaultInstance; } public FsyncResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private FsyncResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FsyncResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FsyncResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public FsyncResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new FsyncResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.FsyncResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FsyncResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FsyncResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_FsyncResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FsyncResponseProto) } static { defaultInstance = new FsyncResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.FsyncResponseProto) } public interface SetTimesRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); // required uint64 mtime = 2; /** * required uint64 mtime = 2; */ boolean hasMtime(); /** * required uint64 mtime = 2; */ long getMtime(); // required uint64 atime = 3; /** * required uint64 atime = 3; */ boolean hasAtime(); /** * required uint64 atime = 3; */ long getAtime(); } /** * Protobuf type {@code hadoop.hdfs.SetTimesRequestProto} */ public static final class SetTimesRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetTimesRequestProtoOrBuilder { // Use SetTimesRequestProto.newBuilder() to construct. private SetTimesRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetTimesRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetTimesRequestProto defaultInstance; public static SetTimesRequestProto getDefaultInstance() { return defaultInstance; } public SetTimesRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetTimesRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; mtime_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; atime_ = input.readUInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetTimesRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetTimesRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetTimesRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetTimesRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required uint64 mtime = 2; public static final int MTIME_FIELD_NUMBER = 2; private long mtime_; /** * required uint64 mtime = 2; */ public boolean hasMtime() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 mtime = 2; */ public long getMtime() { return mtime_; } // required uint64 atime = 3; public static final int ATIME_FIELD_NUMBER = 3; private long atime_; /** * required uint64 atime = 3; */ public boolean hasAtime() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 atime = 3; */ public long getAtime() { return atime_; } private void initFields() { src_ = ""; mtime_ = 0L; atime_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } if (!hasMtime()) { memoizedIsInitialized = 0; return false; } if (!hasAtime()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, mtime_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, atime_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(2, mtime_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeUInt64Size(3, atime_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && (hasMtime() == other.hasMtime()); if (hasMtime()) { result = result && (getMtime() == other.getMtime()); } result = result && (hasAtime() == other.hasAtime()); if (hasAtime()) { result = result && (getAtime() == other.getAtime()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } if (hasMtime()) { hash = (37 * hash) + MTIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMtime()); } if (hasAtime()) { hash = (37 * hash) + ATIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getAtime()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetTimesRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetTimesRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetTimesRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); mtime_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); atime_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetTimesRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.mtime_ = mtime_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.atime_ = atime_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } if (other.hasMtime()) { setMtime(other.getMtime()); } if (other.hasAtime()) { setAtime(other.getAtime()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } if (!hasMtime()) { return false; } if (!hasAtime()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // required uint64 mtime = 2; private long mtime_ ; /** * required uint64 mtime = 2; */ public boolean hasMtime() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required uint64 mtime = 2; */ public long getMtime() { return mtime_; } /** * required uint64 mtime = 2; */ public Builder setMtime(long value) { bitField0_ |= 0x00000002; mtime_ = value; onChanged(); return this; } /** * required uint64 mtime = 2; */ public Builder clearMtime() { bitField0_ = (bitField0_ & ~0x00000002); mtime_ = 0L; onChanged(); return this; } // required uint64 atime = 3; private long atime_ ; /** * required uint64 atime = 3; */ public boolean hasAtime() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required uint64 atime = 3; */ public long getAtime() { return atime_; } /** * required uint64 atime = 3; */ public Builder setAtime(long value) { bitField0_ |= 0x00000004; atime_ = value; onChanged(); return this; } /** * required uint64 atime = 3; */ public Builder clearAtime() { bitField0_ = (bitField0_ & ~0x00000004); atime_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetTimesRequestProto) } static { defaultInstance = new SetTimesRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetTimesRequestProto) } public interface SetTimesResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.SetTimesResponseProto} * *
   * void response
   * 
*/ public static final class SetTimesResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetTimesResponseProtoOrBuilder { // Use SetTimesResponseProto.newBuilder() to construct. private SetTimesResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetTimesResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetTimesResponseProto defaultInstance; public static SetTimesResponseProto getDefaultInstance() { return defaultInstance; } public SetTimesResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetTimesResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetTimesResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetTimesResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetTimesResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetTimesResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetTimesResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetTimesResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetTimesResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetTimesResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetTimesResponseProto) } static { defaultInstance = new SetTimesResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetTimesResponseProto) } public interface CreateSymlinkRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string target = 1; /** * required string target = 1; */ boolean hasTarget(); /** * required string target = 1; */ java.lang.String getTarget(); /** * required string target = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getTargetBytes(); // required string link = 2; /** * required string link = 2; */ boolean hasLink(); /** * required string link = 2; */ java.lang.String getLink(); /** * required string link = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getLinkBytes(); // required .hadoop.hdfs.FsPermissionProto dirPerm = 3; /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ boolean hasDirPerm(); /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getDirPerm(); /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getDirPermOrBuilder(); // required bool createParent = 4; /** * required bool createParent = 4; */ boolean hasCreateParent(); /** * required bool createParent = 4; */ boolean getCreateParent(); } /** * Protobuf type {@code hadoop.hdfs.CreateSymlinkRequestProto} */ public static final class CreateSymlinkRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CreateSymlinkRequestProtoOrBuilder { // Use CreateSymlinkRequestProto.newBuilder() to construct. private CreateSymlinkRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CreateSymlinkRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CreateSymlinkRequestProto defaultInstance; public static CreateSymlinkRequestProto getDefaultInstance() { return defaultInstance; } public CreateSymlinkRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CreateSymlinkRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; target_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; link_ = input.readBytes(); break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = dirPerm_.toBuilder(); } dirPerm_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(dirPerm_); dirPerm_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 32: { bitField0_ |= 0x00000008; createParent_ = input.readBool(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSymlinkRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSymlinkRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CreateSymlinkRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CreateSymlinkRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string target = 1; public static final int TARGET_FIELD_NUMBER = 1; private java.lang.Object target_; /** * required string target = 1; */ public boolean hasTarget() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string target = 1; */ public java.lang.String getTarget() { java.lang.Object ref = target_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { target_ = s; } return s; } } /** * required string target = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getTargetBytes() { java.lang.Object ref = target_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); target_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string link = 2; public static final int LINK_FIELD_NUMBER = 2; private java.lang.Object link_; /** * required string link = 2; */ public boolean hasLink() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string link = 2; */ public java.lang.String getLink() { java.lang.Object ref = link_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { link_ = s; } return s; } } /** * required string link = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getLinkBytes() { java.lang.Object ref = link_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); link_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required .hadoop.hdfs.FsPermissionProto dirPerm = 3; public static final int DIRPERM_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto dirPerm_; /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ public boolean hasDirPerm() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getDirPerm() { return dirPerm_; } /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getDirPermOrBuilder() { return dirPerm_; } // required bool createParent = 4; public static final int CREATEPARENT_FIELD_NUMBER = 4; private boolean createParent_; /** * required bool createParent = 4; */ public boolean hasCreateParent() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bool createParent = 4; */ public boolean getCreateParent() { return createParent_; } private void initFields() { target_ = ""; link_ = ""; dirPerm_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); createParent_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasTarget()) { memoizedIsInitialized = 0; return false; } if (!hasLink()) { memoizedIsInitialized = 0; return false; } if (!hasDirPerm()) { memoizedIsInitialized = 0; return false; } if (!hasCreateParent()) { memoizedIsInitialized = 0; return false; } if (!getDirPerm().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getTargetBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getLinkBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, dirPerm_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBool(4, createParent_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getTargetBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getLinkBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(3, dirPerm_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(4, createParent_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto) obj; boolean result = true; result = result && (hasTarget() == other.hasTarget()); if (hasTarget()) { result = result && getTarget() .equals(other.getTarget()); } result = result && (hasLink() == other.hasLink()); if (hasLink()) { result = result && getLink() .equals(other.getLink()); } result = result && (hasDirPerm() == other.hasDirPerm()); if (hasDirPerm()) { result = result && getDirPerm() .equals(other.getDirPerm()); } result = result && (hasCreateParent() == other.hasCreateParent()); if (hasCreateParent()) { result = result && (getCreateParent() == other.getCreateParent()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasTarget()) { hash = (37 * hash) + TARGET_FIELD_NUMBER; hash = (53 * hash) + getTarget().hashCode(); } if (hasLink()) { hash = (37 * hash) + LINK_FIELD_NUMBER; hash = (53 * hash) + getLink().hashCode(); } if (hasDirPerm()) { hash = (37 * hash) + DIRPERM_FIELD_NUMBER; hash = (53 * hash) + getDirPerm().hashCode(); } if (hasCreateParent()) { hash = (37 * hash) + CREATEPARENT_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getCreateParent()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CreateSymlinkRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSymlinkRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSymlinkRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getDirPermFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); target_ = ""; bitField0_ = (bitField0_ & ~0x00000001); link_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (dirPermBuilder_ == null) { dirPerm_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); } else { dirPermBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); createParent_ = false; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSymlinkRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.target_ = target_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.link_ = link_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (dirPermBuilder_ == null) { result.dirPerm_ = dirPerm_; } else { result.dirPerm_ = dirPermBuilder_.build(); } if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.createParent_ = createParent_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.getDefaultInstance()) return this; if (other.hasTarget()) { bitField0_ |= 0x00000001; target_ = other.target_; onChanged(); } if (other.hasLink()) { bitField0_ |= 0x00000002; link_ = other.link_; onChanged(); } if (other.hasDirPerm()) { mergeDirPerm(other.getDirPerm()); } if (other.hasCreateParent()) { setCreateParent(other.getCreateParent()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasTarget()) { return false; } if (!hasLink()) { return false; } if (!hasDirPerm()) { return false; } if (!hasCreateParent()) { return false; } if (!getDirPerm().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string target = 1; private java.lang.Object target_ = ""; /** * required string target = 1; */ public boolean hasTarget() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string target = 1; */ public java.lang.String getTarget() { java.lang.Object ref = target_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); target_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string target = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getTargetBytes() { java.lang.Object ref = target_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); target_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string target = 1; */ public Builder setTarget( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; target_ = value; onChanged(); return this; } /** * required string target = 1; */ public Builder clearTarget() { bitField0_ = (bitField0_ & ~0x00000001); target_ = getDefaultInstance().getTarget(); onChanged(); return this; } /** * required string target = 1; */ public Builder setTargetBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; target_ = value; onChanged(); return this; } // required string link = 2; private java.lang.Object link_ = ""; /** * required string link = 2; */ public boolean hasLink() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string link = 2; */ public java.lang.String getLink() { java.lang.Object ref = link_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); link_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string link = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getLinkBytes() { java.lang.Object ref = link_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); link_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string link = 2; */ public Builder setLink( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; link_ = value; onChanged(); return this; } /** * required string link = 2; */ public Builder clearLink() { bitField0_ = (bitField0_ & ~0x00000002); link_ = getDefaultInstance().getLink(); onChanged(); return this; } /** * required string link = 2; */ public Builder setLinkBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; link_ = value; onChanged(); return this; } // required .hadoop.hdfs.FsPermissionProto dirPerm = 3; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto dirPerm_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> dirPermBuilder_; /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ public boolean hasDirPerm() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getDirPerm() { if (dirPermBuilder_ == null) { return dirPerm_; } else { return dirPermBuilder_.getMessage(); } } /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ public Builder setDirPerm(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (dirPermBuilder_ == null) { if (value == null) { throw new NullPointerException(); } dirPerm_ = value; onChanged(); } else { dirPermBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ public Builder setDirPerm( org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) { if (dirPermBuilder_ == null) { dirPerm_ = builderForValue.build(); onChanged(); } else { dirPermBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ public Builder mergeDirPerm(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) { if (dirPermBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && dirPerm_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) { dirPerm_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.newBuilder(dirPerm_).mergeFrom(value).buildPartial(); } else { dirPerm_ = value; } onChanged(); } else { dirPermBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ public Builder clearDirPerm() { if (dirPermBuilder_ == null) { dirPerm_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance(); onChanged(); } else { dirPermBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getDirPermBuilder() { bitField0_ |= 0x00000004; onChanged(); return getDirPermFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getDirPermOrBuilder() { if (dirPermBuilder_ != null) { return dirPermBuilder_.getMessageOrBuilder(); } else { return dirPerm_; } } /** * required .hadoop.hdfs.FsPermissionProto dirPerm = 3; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> getDirPermFieldBuilder() { if (dirPermBuilder_ == null) { dirPermBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>( dirPerm_, getParentForChildren(), isClean()); dirPerm_ = null; } return dirPermBuilder_; } // required bool createParent = 4; private boolean createParent_ ; /** * required bool createParent = 4; */ public boolean hasCreateParent() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required bool createParent = 4; */ public boolean getCreateParent() { return createParent_; } /** * required bool createParent = 4; */ public Builder setCreateParent(boolean value) { bitField0_ |= 0x00000008; createParent_ = value; onChanged(); return this; } /** * required bool createParent = 4; */ public Builder clearCreateParent() { bitField0_ = (bitField0_ & ~0x00000008); createParent_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CreateSymlinkRequestProto) } static { defaultInstance = new CreateSymlinkRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CreateSymlinkRequestProto) } public interface CreateSymlinkResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.CreateSymlinkResponseProto} * *
   * void response
   * 
*/ public static final class CreateSymlinkResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CreateSymlinkResponseProtoOrBuilder { // Use CreateSymlinkResponseProto.newBuilder() to construct. private CreateSymlinkResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CreateSymlinkResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CreateSymlinkResponseProto defaultInstance; public static CreateSymlinkResponseProto getDefaultInstance() { return defaultInstance; } public CreateSymlinkResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CreateSymlinkResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSymlinkResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSymlinkResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CreateSymlinkResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CreateSymlinkResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CreateSymlinkResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSymlinkResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSymlinkResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSymlinkResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CreateSymlinkResponseProto) } static { defaultInstance = new CreateSymlinkResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CreateSymlinkResponseProto) } public interface GetLinkTargetRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string path = 1; /** * required string path = 1; */ boolean hasPath(); /** * required string path = 1; */ java.lang.String getPath(); /** * required string path = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes(); } /** * Protobuf type {@code hadoop.hdfs.GetLinkTargetRequestProto} */ public static final class GetLinkTargetRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetLinkTargetRequestProtoOrBuilder { // Use GetLinkTargetRequestProto.newBuilder() to construct. private GetLinkTargetRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetLinkTargetRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetLinkTargetRequestProto defaultInstance; public static GetLinkTargetRequestProto getDefaultInstance() { return defaultInstance; } public GetLinkTargetRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetLinkTargetRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; path_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLinkTargetRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLinkTargetRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetLinkTargetRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetLinkTargetRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string path = 1; public static final int PATH_FIELD_NUMBER = 1; private java.lang.Object path_; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { path_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPath()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getPathBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getPathBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto) obj; boolean result = true; result = result && (hasPath() == other.hasPath()); if (hasPath()) { result = result && getPath() .equals(other.getPath()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetLinkTargetRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLinkTargetRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLinkTargetRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLinkTargetRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.path_ = path_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.getDefaultInstance()) return this; if (other.hasPath()) { bitField0_ |= 0x00000001; path_ = other.path_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPath()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string path = 1; private java.lang.Object path_ = ""; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); path_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string path = 1; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } /** * required string path = 1; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000001); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * required string path = 1; */ public Builder setPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetLinkTargetRequestProto) } static { defaultInstance = new GetLinkTargetRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetLinkTargetRequestProto) } public interface GetLinkTargetResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional string targetPath = 1; /** * optional string targetPath = 1; */ boolean hasTargetPath(); /** * optional string targetPath = 1; */ java.lang.String getTargetPath(); /** * optional string targetPath = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getTargetPathBytes(); } /** * Protobuf type {@code hadoop.hdfs.GetLinkTargetResponseProto} */ public static final class GetLinkTargetResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetLinkTargetResponseProtoOrBuilder { // Use GetLinkTargetResponseProto.newBuilder() to construct. private GetLinkTargetResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetLinkTargetResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetLinkTargetResponseProto defaultInstance; public static GetLinkTargetResponseProto getDefaultInstance() { return defaultInstance; } public GetLinkTargetResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetLinkTargetResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; targetPath_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLinkTargetResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLinkTargetResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetLinkTargetResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetLinkTargetResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional string targetPath = 1; public static final int TARGETPATH_FIELD_NUMBER = 1; private java.lang.Object targetPath_; /** * optional string targetPath = 1; */ public boolean hasTargetPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string targetPath = 1; */ public java.lang.String getTargetPath() { java.lang.Object ref = targetPath_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { targetPath_ = s; } return s; } } /** * optional string targetPath = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getTargetPathBytes() { java.lang.Object ref = targetPath_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); targetPath_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { targetPath_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getTargetPathBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getTargetPathBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto) obj; boolean result = true; result = result && (hasTargetPath() == other.hasTargetPath()); if (hasTargetPath()) { result = result && getTargetPath() .equals(other.getTargetPath()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasTargetPath()) { hash = (37 * hash) + TARGETPATH_FIELD_NUMBER; hash = (53 * hash) + getTargetPath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetLinkTargetResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLinkTargetResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLinkTargetResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); targetPath_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetLinkTargetResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.targetPath_ = targetPath_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance()) return this; if (other.hasTargetPath()) { bitField0_ |= 0x00000001; targetPath_ = other.targetPath_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional string targetPath = 1; private java.lang.Object targetPath_ = ""; /** * optional string targetPath = 1; */ public boolean hasTargetPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string targetPath = 1; */ public java.lang.String getTargetPath() { java.lang.Object ref = targetPath_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); targetPath_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string targetPath = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getTargetPathBytes() { java.lang.Object ref = targetPath_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); targetPath_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string targetPath = 1; */ public Builder setTargetPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; targetPath_ = value; onChanged(); return this; } /** * optional string targetPath = 1; */ public Builder clearTargetPath() { bitField0_ = (bitField0_ & ~0x00000001); targetPath_ = getDefaultInstance().getTargetPath(); onChanged(); return this; } /** * optional string targetPath = 1; */ public Builder setTargetPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; targetPath_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetLinkTargetResponseProto) } static { defaultInstance = new GetLinkTargetResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetLinkTargetResponseProto) } public interface UpdateBlockForPipelineRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.ExtendedBlockProto block = 1; /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ boolean hasBlock(); /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock(); /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder(); // required string clientName = 2; /** * required string clientName = 2; */ boolean hasClientName(); /** * required string clientName = 2; */ java.lang.String getClientName(); /** * required string clientName = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.UpdateBlockForPipelineRequestProto} */ public static final class UpdateBlockForPipelineRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements UpdateBlockForPipelineRequestProtoOrBuilder { // Use UpdateBlockForPipelineRequestProto.newBuilder() to construct. private UpdateBlockForPipelineRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private UpdateBlockForPipelineRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final UpdateBlockForPipelineRequestProto defaultInstance; public static UpdateBlockForPipelineRequestProto getDefaultInstance() { return defaultInstance; } public UpdateBlockForPipelineRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private UpdateBlockForPipelineRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = block_.toBuilder(); } block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(block_); block_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { bitField0_ |= 0x00000002; clientName_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdateBlockForPipelineRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdateBlockForPipelineRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public UpdateBlockForPipelineRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new UpdateBlockForPipelineRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.ExtendedBlockProto block = 1; public static final int BLOCK_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_; /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { return block_; } /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { return block_; } // required string clientName = 2; public static final int CLIENTNAME_FIELD_NUMBER = 2; private java.lang.Object clientName_; /** * required string clientName = 2; */ public boolean hasClientName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string clientName = 2; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * required string clientName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); clientName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBlock()) { memoizedIsInitialized = 0; return false; } if (!hasClientName()) { memoizedIsInitialized = 0; return false; } if (!getBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, block_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getClientNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, block_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getClientNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto) obj; boolean result = true; result = result && (hasBlock() == other.hasBlock()); if (hasBlock()) { result = result && getBlock() .equals(other.getBlock()); } result = result && (hasClientName() == other.hasClientName()); if (hasClientName()) { result = result && getClientName() .equals(other.getClientName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBlock()) { hash = (37 * hash) + BLOCK_FIELD_NUMBER; hash = (53 * hash) + getBlock().hashCode(); } if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.UpdateBlockForPipelineRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdateBlockForPipelineRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdateBlockForPipelineRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBlockFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdateBlockForPipelineRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (blockBuilder_ == null) { result.block_ = block_; } else { result.block_ = blockBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.clientName_ = clientName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.getDefaultInstance()) return this; if (other.hasBlock()) { mergeBlock(other.getBlock()); } if (other.hasClientName()) { bitField0_ |= 0x00000002; clientName_ = other.clientName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBlock()) { return false; } if (!hasClientName()) { return false; } if (!getBlock().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.ExtendedBlockProto block = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_; /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { if (blockBuilder_ == null) { return block_; } else { return blockBuilder_.getMessage(); } } /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (blockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } block_ = value; onChanged(); } else { blockBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ public Builder setBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { if (blockBuilder_ == null) { block_ = builderForValue.build(); onChanged(); } else { blockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (blockBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); } else { block_ = value; } onChanged(); } else { blockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ public Builder clearBlock() { if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); onChanged(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() { bitField0_ |= 0x00000001; onChanged(); return getBlockFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { if (blockBuilder_ != null) { return blockBuilder_.getMessageOrBuilder(); } else { return block_; } } /** * required .hadoop.hdfs.ExtendedBlockProto block = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> getBlockFieldBuilder() { if (blockBuilder_ == null) { blockBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( block_, getParentForChildren(), isClean()); block_ = null; } return blockBuilder_; } // required string clientName = 2; private java.lang.Object clientName_ = ""; /** * required string clientName = 2; */ public boolean hasClientName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string clientName = 2; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientName = 2; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientName_ = value; onChanged(); return this; } /** * required string clientName = 2; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000002); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * required string clientName = 2; */ public Builder setClientNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; clientName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.UpdateBlockForPipelineRequestProto) } static { defaultInstance = new UpdateBlockForPipelineRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.UpdateBlockForPipelineRequestProto) } public interface UpdateBlockForPipelineResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.LocatedBlockProto block = 1; /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ boolean hasBlock(); /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock(); /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.UpdateBlockForPipelineResponseProto} */ public static final class UpdateBlockForPipelineResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements UpdateBlockForPipelineResponseProtoOrBuilder { // Use UpdateBlockForPipelineResponseProto.newBuilder() to construct. private UpdateBlockForPipelineResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private UpdateBlockForPipelineResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final UpdateBlockForPipelineResponseProto defaultInstance; public static UpdateBlockForPipelineResponseProto getDefaultInstance() { return defaultInstance; } public UpdateBlockForPipelineResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private UpdateBlockForPipelineResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = block_.toBuilder(); } block_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(block_); block_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdateBlockForPipelineResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdateBlockForPipelineResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public UpdateBlockForPipelineResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new UpdateBlockForPipelineResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.LocatedBlockProto block = 1; public static final int BLOCK_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_; /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { return block_; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { return block_; } private void initFields() { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBlock()) { memoizedIsInitialized = 0; return false; } if (!getBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, block_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, block_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto) obj; boolean result = true; result = result && (hasBlock() == other.hasBlock()); if (hasBlock()) { result = result && getBlock() .equals(other.getBlock()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBlock()) { hash = (37 * hash) + BLOCK_FIELD_NUMBER; hash = (53 * hash) + getBlock().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.UpdateBlockForPipelineResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdateBlockForPipelineResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdateBlockForPipelineResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getBlockFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdateBlockForPipelineResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (blockBuilder_ == null) { result.block_ = block_; } else { result.block_ = blockBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance()) return this; if (other.hasBlock()) { mergeBlock(other.getBlock()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBlock()) { return false; } if (!getBlock().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.LocatedBlockProto block = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_; /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public boolean hasBlock() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { if (blockBuilder_ == null) { return block_; } else { return blockBuilder_.getMessage(); } } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } block_ = value; onChanged(); } else { blockBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder setBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { if (blockBuilder_ == null) { block_ = builderForValue.build(); onChanged(); } else { blockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { if (blockBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); } else { block_ = value; } onChanged(); } else { blockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public Builder clearBlock() { if (blockBuilder_ == null) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); onChanged(); } else { blockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() { bitField0_ |= 0x00000001; onChanged(); return getBlockFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { if (blockBuilder_ != null) { return blockBuilder_.getMessageOrBuilder(); } else { return block_; } } /** * required .hadoop.hdfs.LocatedBlockProto block = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> getBlockFieldBuilder() { if (blockBuilder_ == null) { blockBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( block_, getParentForChildren(), isClean()); block_ = null; } return blockBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.UpdateBlockForPipelineResponseProto) } static { defaultInstance = new UpdateBlockForPipelineResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.UpdateBlockForPipelineResponseProto) } public interface UpdatePipelineRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string clientName = 1; /** * required string clientName = 1; */ boolean hasClientName(); /** * required string clientName = 1; */ java.lang.String getClientName(); /** * required string clientName = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes(); // required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ boolean hasOldBlock(); /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getOldBlock(); /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getOldBlockOrBuilder(); // required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ boolean hasNewBlock(); /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getNewBlock(); /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getNewBlockOrBuilder(); // repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ java.util.List getNewNodesList(); /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewNodes(int index); /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ int getNewNodesCount(); /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ java.util.List getNewNodesOrBuilderList(); /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewNodesOrBuilder( int index); // repeated string storageIDs = 5; /** * repeated string storageIDs = 5; */ java.util.List getStorageIDsList(); /** * repeated string storageIDs = 5; */ int getStorageIDsCount(); /** * repeated string storageIDs = 5; */ java.lang.String getStorageIDs(int index); /** * repeated string storageIDs = 5; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getStorageIDsBytes(int index); } /** * Protobuf type {@code hadoop.hdfs.UpdatePipelineRequestProto} */ public static final class UpdatePipelineRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements UpdatePipelineRequestProtoOrBuilder { // Use UpdatePipelineRequestProto.newBuilder() to construct. private UpdatePipelineRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private UpdatePipelineRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final UpdatePipelineRequestProto defaultInstance; public static UpdatePipelineRequestProto getDefaultInstance() { return defaultInstance; } public UpdatePipelineRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private UpdatePipelineRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; clientName_ = input.readBytes(); break; } case 18: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = oldBlock_.toBuilder(); } oldBlock_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(oldBlock_); oldBlock_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } case 26: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = newBlock_.toBuilder(); } newBlock_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(newBlock_); newBlock_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000004; break; } case 34: { if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { newNodes_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000008; } newNodes_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.PARSER, extensionRegistry)); break; } case 42: { if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { storageIDs_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000010; } storageIDs_.add(input.readBytes()); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { newNodes_ = java.util.Collections.unmodifiableList(newNodes_); } if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { storageIDs_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList(storageIDs_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdatePipelineRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdatePipelineRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public UpdatePipelineRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new UpdatePipelineRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string clientName = 1; public static final int CLIENTNAME_FIELD_NUMBER = 1; private java.lang.Object clientName_; /** * required string clientName = 1; */ public boolean hasClientName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string clientName = 1; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * required string clientName = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; public static final int OLDBLOCK_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto oldBlock_; /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ public boolean hasOldBlock() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getOldBlock() { return oldBlock_; } /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getOldBlockOrBuilder() { return oldBlock_; } // required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; public static final int NEWBLOCK_FIELD_NUMBER = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto newBlock_; /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ public boolean hasNewBlock() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getNewBlock() { return newBlock_; } /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getNewBlockOrBuilder() { return newBlock_; } // repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; public static final int NEWNODES_FIELD_NUMBER = 4; private java.util.List newNodes_; /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public java.util.List getNewNodesList() { return newNodes_; } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public java.util.List getNewNodesOrBuilderList() { return newNodes_; } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public int getNewNodesCount() { return newNodes_.size(); } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewNodes(int index) { return newNodes_.get(index); } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewNodesOrBuilder( int index) { return newNodes_.get(index); } // repeated string storageIDs = 5; public static final int STORAGEIDS_FIELD_NUMBER = 5; private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList storageIDs_; /** * repeated string storageIDs = 5; */ public java.util.List getStorageIDsList() { return storageIDs_; } /** * repeated string storageIDs = 5; */ public int getStorageIDsCount() { return storageIDs_.size(); } /** * repeated string storageIDs = 5; */ public java.lang.String getStorageIDs(int index) { return storageIDs_.get(index); } /** * repeated string storageIDs = 5; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getStorageIDsBytes(int index) { return storageIDs_.getByteString(index); } private void initFields() { clientName_ = ""; oldBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); newBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); newNodes_ = java.util.Collections.emptyList(); storageIDs_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasClientName()) { memoizedIsInitialized = 0; return false; } if (!hasOldBlock()) { memoizedIsInitialized = 0; return false; } if (!hasNewBlock()) { memoizedIsInitialized = 0; return false; } if (!getOldBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } if (!getNewBlock().isInitialized()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getNewNodesCount(); i++) { if (!getNewNodes(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getClientNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, oldBlock_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeMessage(3, newBlock_); } for (int i = 0; i < newNodes_.size(); i++) { output.writeMessage(4, newNodes_.get(i)); } for (int i = 0; i < storageIDs_.size(); i++) { output.writeBytes(5, storageIDs_.getByteString(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getClientNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(2, oldBlock_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(3, newBlock_); } for (int i = 0; i < newNodes_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(4, newNodes_.get(i)); } { int dataSize = 0; for (int i = 0; i < storageIDs_.size(); i++) { dataSize += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSizeNoTag(storageIDs_.getByteString(i)); } size += dataSize; size += 1 * getStorageIDsList().size(); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto) obj; boolean result = true; result = result && (hasClientName() == other.hasClientName()); if (hasClientName()) { result = result && getClientName() .equals(other.getClientName()); } result = result && (hasOldBlock() == other.hasOldBlock()); if (hasOldBlock()) { result = result && getOldBlock() .equals(other.getOldBlock()); } result = result && (hasNewBlock() == other.hasNewBlock()); if (hasNewBlock()) { result = result && getNewBlock() .equals(other.getNewBlock()); } result = result && getNewNodesList() .equals(other.getNewNodesList()); result = result && getStorageIDsList() .equals(other.getStorageIDsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } if (hasOldBlock()) { hash = (37 * hash) + OLDBLOCK_FIELD_NUMBER; hash = (53 * hash) + getOldBlock().hashCode(); } if (hasNewBlock()) { hash = (37 * hash) + NEWBLOCK_FIELD_NUMBER; hash = (53 * hash) + getNewBlock().hashCode(); } if (getNewNodesCount() > 0) { hash = (37 * hash) + NEWNODES_FIELD_NUMBER; hash = (53 * hash) + getNewNodesList().hashCode(); } if (getStorageIDsCount() > 0) { hash = (37 * hash) + STORAGEIDS_FIELD_NUMBER; hash = (53 * hash) + getStorageIDsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.UpdatePipelineRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdatePipelineRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdatePipelineRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getOldBlockFieldBuilder(); getNewBlockFieldBuilder(); getNewNodesFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (oldBlockBuilder_ == null) { oldBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); } else { oldBlockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); if (newBlockBuilder_ == null) { newBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); } else { newBlockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); if (newNodesBuilder_ == null) { newNodes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); } else { newNodesBuilder_.clear(); } storageIDs_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000010); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdatePipelineRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.clientName_ = clientName_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (oldBlockBuilder_ == null) { result.oldBlock_ = oldBlock_; } else { result.oldBlock_ = oldBlockBuilder_.build(); } if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (newBlockBuilder_ == null) { result.newBlock_ = newBlock_; } else { result.newBlock_ = newBlockBuilder_.build(); } if (newNodesBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008)) { newNodes_ = java.util.Collections.unmodifiableList(newNodes_); bitField0_ = (bitField0_ & ~0x00000008); } result.newNodes_ = newNodes_; } else { result.newNodes_ = newNodesBuilder_.build(); } if (((bitField0_ & 0x00000010) == 0x00000010)) { storageIDs_ = new io.prestosql.hadoop.$internal.com.google.protobuf.UnmodifiableLazyStringList( storageIDs_); bitField0_ = (bitField0_ & ~0x00000010); } result.storageIDs_ = storageIDs_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.getDefaultInstance()) return this; if (other.hasClientName()) { bitField0_ |= 0x00000001; clientName_ = other.clientName_; onChanged(); } if (other.hasOldBlock()) { mergeOldBlock(other.getOldBlock()); } if (other.hasNewBlock()) { mergeNewBlock(other.getNewBlock()); } if (newNodesBuilder_ == null) { if (!other.newNodes_.isEmpty()) { if (newNodes_.isEmpty()) { newNodes_ = other.newNodes_; bitField0_ = (bitField0_ & ~0x00000008); } else { ensureNewNodesIsMutable(); newNodes_.addAll(other.newNodes_); } onChanged(); } } else { if (!other.newNodes_.isEmpty()) { if (newNodesBuilder_.isEmpty()) { newNodesBuilder_.dispose(); newNodesBuilder_ = null; newNodes_ = other.newNodes_; bitField0_ = (bitField0_ & ~0x00000008); newNodesBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getNewNodesFieldBuilder() : null; } else { newNodesBuilder_.addAllMessages(other.newNodes_); } } } if (!other.storageIDs_.isEmpty()) { if (storageIDs_.isEmpty()) { storageIDs_ = other.storageIDs_; bitField0_ = (bitField0_ & ~0x00000010); } else { ensureStorageIDsIsMutable(); storageIDs_.addAll(other.storageIDs_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasClientName()) { return false; } if (!hasOldBlock()) { return false; } if (!hasNewBlock()) { return false; } if (!getOldBlock().isInitialized()) { return false; } if (!getNewBlock().isInitialized()) { return false; } for (int i = 0; i < getNewNodesCount(); i++) { if (!getNewNodes(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string clientName = 1; private java.lang.Object clientName_ = ""; /** * required string clientName = 1; */ public boolean hasClientName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string clientName = 1; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientName = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientName = 1; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; clientName_ = value; onChanged(); return this; } /** * required string clientName = 1; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000001); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * required string clientName = 1; */ public Builder setClientNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; clientName_ = value; onChanged(); return this; } // required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto oldBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> oldBlockBuilder_; /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ public boolean hasOldBlock() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getOldBlock() { if (oldBlockBuilder_ == null) { return oldBlock_; } else { return oldBlockBuilder_.getMessage(); } } /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ public Builder setOldBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (oldBlockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } oldBlock_ = value; onChanged(); } else { oldBlockBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ public Builder setOldBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { if (oldBlockBuilder_ == null) { oldBlock_ = builderForValue.build(); onChanged(); } else { oldBlockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ public Builder mergeOldBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (oldBlockBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && oldBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { oldBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(oldBlock_).mergeFrom(value).buildPartial(); } else { oldBlock_ = value; } onChanged(); } else { oldBlockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ public Builder clearOldBlock() { if (oldBlockBuilder_ == null) { oldBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); onChanged(); } else { oldBlockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getOldBlockBuilder() { bitField0_ |= 0x00000002; onChanged(); return getOldBlockFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getOldBlockOrBuilder() { if (oldBlockBuilder_ != null) { return oldBlockBuilder_.getMessageOrBuilder(); } else { return oldBlock_; } } /** * required .hadoop.hdfs.ExtendedBlockProto oldBlock = 2; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> getOldBlockFieldBuilder() { if (oldBlockBuilder_ == null) { oldBlockBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( oldBlock_, getParentForChildren(), isClean()); oldBlock_ = null; } return oldBlockBuilder_; } // required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto newBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> newBlockBuilder_; /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ public boolean hasNewBlock() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getNewBlock() { if (newBlockBuilder_ == null) { return newBlock_; } else { return newBlockBuilder_.getMessage(); } } /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ public Builder setNewBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (newBlockBuilder_ == null) { if (value == null) { throw new NullPointerException(); } newBlock_ = value; onChanged(); } else { newBlockBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ public Builder setNewBlock( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { if (newBlockBuilder_ == null) { newBlock_ = builderForValue.build(); onChanged(); } else { newBlockBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ public Builder mergeNewBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { if (newBlockBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && newBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { newBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(newBlock_).mergeFrom(value).buildPartial(); } else { newBlock_ = value; } onChanged(); } else { newBlockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ public Builder clearNewBlock() { if (newBlockBuilder_ == null) { newBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); onChanged(); } else { newBlockBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getNewBlockBuilder() { bitField0_ |= 0x00000004; onChanged(); return getNewBlockFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getNewBlockOrBuilder() { if (newBlockBuilder_ != null) { return newBlockBuilder_.getMessageOrBuilder(); } else { return newBlock_; } } /** * required .hadoop.hdfs.ExtendedBlockProto newBlock = 3; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> getNewBlockFieldBuilder() { if (newBlockBuilder_ == null) { newBlockBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( newBlock_, getParentForChildren(), isClean()); newBlock_ = null; } return newBlockBuilder_; } // repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; private java.util.List newNodes_ = java.util.Collections.emptyList(); private void ensureNewNodesIsMutable() { if (!((bitField0_ & 0x00000008) == 0x00000008)) { newNodes_ = new java.util.ArrayList(newNodes_); bitField0_ |= 0x00000008; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> newNodesBuilder_; /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public java.util.List getNewNodesList() { if (newNodesBuilder_ == null) { return java.util.Collections.unmodifiableList(newNodes_); } else { return newNodesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public int getNewNodesCount() { if (newNodesBuilder_ == null) { return newNodes_.size(); } else { return newNodesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewNodes(int index) { if (newNodesBuilder_ == null) { return newNodes_.get(index); } else { return newNodesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public Builder setNewNodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { if (newNodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureNewNodesIsMutable(); newNodes_.set(index, value); onChanged(); } else { newNodesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public Builder setNewNodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { if (newNodesBuilder_ == null) { ensureNewNodesIsMutable(); newNodes_.set(index, builderForValue.build()); onChanged(); } else { newNodesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public Builder addNewNodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { if (newNodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureNewNodesIsMutable(); newNodes_.add(value); onChanged(); } else { newNodesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public Builder addNewNodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { if (newNodesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureNewNodesIsMutable(); newNodes_.add(index, value); onChanged(); } else { newNodesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public Builder addNewNodes( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { if (newNodesBuilder_ == null) { ensureNewNodesIsMutable(); newNodes_.add(builderForValue.build()); onChanged(); } else { newNodesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public Builder addNewNodes( int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { if (newNodesBuilder_ == null) { ensureNewNodesIsMutable(); newNodes_.add(index, builderForValue.build()); onChanged(); } else { newNodesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public Builder addAllNewNodes( java.lang.Iterable values) { if (newNodesBuilder_ == null) { ensureNewNodesIsMutable(); super.addAll(values, newNodes_); onChanged(); } else { newNodesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public Builder clearNewNodes() { if (newNodesBuilder_ == null) { newNodes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); onChanged(); } else { newNodesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public Builder removeNewNodes(int index) { if (newNodesBuilder_ == null) { ensureNewNodesIsMutable(); newNodes_.remove(index); onChanged(); } else { newNodesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getNewNodesBuilder( int index) { return getNewNodesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewNodesOrBuilder( int index) { if (newNodesBuilder_ == null) { return newNodes_.get(index); } else { return newNodesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public java.util.List getNewNodesOrBuilderList() { if (newNodesBuilder_ != null) { return newNodesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(newNodes_); } } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewNodesBuilder() { return getNewNodesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewNodesBuilder( int index) { return getNewNodesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.DatanodeIDProto newNodes = 4; */ public java.util.List getNewNodesBuilderList() { return getNewNodesFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> getNewNodesFieldBuilder() { if (newNodesBuilder_ == null) { newNodesBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( newNodes_, ((bitField0_ & 0x00000008) == 0x00000008), getParentForChildren(), isClean()); newNodes_ = null; } return newNodesBuilder_; } // repeated string storageIDs = 5; private io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringList storageIDs_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureStorageIDsIsMutable() { if (!((bitField0_ & 0x00000010) == 0x00000010)) { storageIDs_ = new io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList(storageIDs_); bitField0_ |= 0x00000010; } } /** * repeated string storageIDs = 5; */ public java.util.List getStorageIDsList() { return java.util.Collections.unmodifiableList(storageIDs_); } /** * repeated string storageIDs = 5; */ public int getStorageIDsCount() { return storageIDs_.size(); } /** * repeated string storageIDs = 5; */ public java.lang.String getStorageIDs(int index) { return storageIDs_.get(index); } /** * repeated string storageIDs = 5; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getStorageIDsBytes(int index) { return storageIDs_.getByteString(index); } /** * repeated string storageIDs = 5; */ public Builder setStorageIDs( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.set(index, value); onChanged(); return this; } /** * repeated string storageIDs = 5; */ public Builder addStorageIDs( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.add(value); onChanged(); return this; } /** * repeated string storageIDs = 5; */ public Builder addAllStorageIDs( java.lang.Iterable values) { ensureStorageIDsIsMutable(); super.addAll(values, storageIDs_); onChanged(); return this; } /** * repeated string storageIDs = 5; */ public Builder clearStorageIDs() { storageIDs_ = io.prestosql.hadoop.$internal.com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000010); onChanged(); return this; } /** * repeated string storageIDs = 5; */ public Builder addStorageIDsBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureStorageIDsIsMutable(); storageIDs_.add(value); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.UpdatePipelineRequestProto) } static { defaultInstance = new UpdatePipelineRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.UpdatePipelineRequestProto) } public interface UpdatePipelineResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.UpdatePipelineResponseProto} * *
   * void response
   * 
*/ public static final class UpdatePipelineResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements UpdatePipelineResponseProtoOrBuilder { // Use UpdatePipelineResponseProto.newBuilder() to construct. private UpdatePipelineResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private UpdatePipelineResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final UpdatePipelineResponseProto defaultInstance; public static UpdatePipelineResponseProto getDefaultInstance() { return defaultInstance; } public UpdatePipelineResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private UpdatePipelineResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdatePipelineResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdatePipelineResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public UpdatePipelineResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new UpdatePipelineResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.UpdatePipelineResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdatePipelineResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdatePipelineResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_UpdatePipelineResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.UpdatePipelineResponseProto) } static { defaultInstance = new UpdatePipelineResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.UpdatePipelineResponseProto) } public interface SetBalancerBandwidthRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required int64 bandwidth = 1; /** * required int64 bandwidth = 1; */ boolean hasBandwidth(); /** * required int64 bandwidth = 1; */ long getBandwidth(); } /** * Protobuf type {@code hadoop.hdfs.SetBalancerBandwidthRequestProto} */ public static final class SetBalancerBandwidthRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetBalancerBandwidthRequestProtoOrBuilder { // Use SetBalancerBandwidthRequestProto.newBuilder() to construct. private SetBalancerBandwidthRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetBalancerBandwidthRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetBalancerBandwidthRequestProto defaultInstance; public static SetBalancerBandwidthRequestProto getDefaultInstance() { return defaultInstance; } public SetBalancerBandwidthRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetBalancerBandwidthRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; bandwidth_ = input.readInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetBalancerBandwidthRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetBalancerBandwidthRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetBalancerBandwidthRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetBalancerBandwidthRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 bandwidth = 1; public static final int BANDWIDTH_FIELD_NUMBER = 1; private long bandwidth_; /** * required int64 bandwidth = 1; */ public boolean hasBandwidth() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 bandwidth = 1; */ public long getBandwidth() { return bandwidth_; } private void initFields() { bandwidth_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasBandwidth()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, bandwidth_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, bandwidth_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto) obj; boolean result = true; result = result && (hasBandwidth() == other.hasBandwidth()); if (hasBandwidth()) { result = result && (getBandwidth() == other.getBandwidth()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBandwidth()) { hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBandwidth()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetBalancerBandwidthRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetBalancerBandwidthRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetBalancerBandwidthRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); bandwidth_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetBalancerBandwidthRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.bandwidth_ = bandwidth_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.getDefaultInstance()) return this; if (other.hasBandwidth()) { setBandwidth(other.getBandwidth()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasBandwidth()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 bandwidth = 1; private long bandwidth_ ; /** * required int64 bandwidth = 1; */ public boolean hasBandwidth() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 bandwidth = 1; */ public long getBandwidth() { return bandwidth_; } /** * required int64 bandwidth = 1; */ public Builder setBandwidth(long value) { bitField0_ |= 0x00000001; bandwidth_ = value; onChanged(); return this; } /** * required int64 bandwidth = 1; */ public Builder clearBandwidth() { bitField0_ = (bitField0_ & ~0x00000001); bandwidth_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetBalancerBandwidthRequestProto) } static { defaultInstance = new SetBalancerBandwidthRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetBalancerBandwidthRequestProto) } public interface SetBalancerBandwidthResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.SetBalancerBandwidthResponseProto} * *
   * void response
   * 
*/ public static final class SetBalancerBandwidthResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SetBalancerBandwidthResponseProtoOrBuilder { // Use SetBalancerBandwidthResponseProto.newBuilder() to construct. private SetBalancerBandwidthResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SetBalancerBandwidthResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SetBalancerBandwidthResponseProto defaultInstance; public static SetBalancerBandwidthResponseProto getDefaultInstance() { return defaultInstance; } public SetBalancerBandwidthResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SetBalancerBandwidthResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetBalancerBandwidthResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetBalancerBandwidthResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SetBalancerBandwidthResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SetBalancerBandwidthResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SetBalancerBandwidthResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetBalancerBandwidthResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetBalancerBandwidthResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SetBalancerBandwidthResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SetBalancerBandwidthResponseProto) } static { defaultInstance = new SetBalancerBandwidthResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SetBalancerBandwidthResponseProto) } public interface GetDataEncryptionKeyRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.GetDataEncryptionKeyRequestProto} * *
   * no parameters
   * 
*/ public static final class GetDataEncryptionKeyRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetDataEncryptionKeyRequestProtoOrBuilder { // Use GetDataEncryptionKeyRequestProto.newBuilder() to construct. private GetDataEncryptionKeyRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetDataEncryptionKeyRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetDataEncryptionKeyRequestProto defaultInstance; public static GetDataEncryptionKeyRequestProto getDefaultInstance() { return defaultInstance; } public GetDataEncryptionKeyRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetDataEncryptionKeyRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDataEncryptionKeyRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDataEncryptionKeyRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetDataEncryptionKeyRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetDataEncryptionKeyRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetDataEncryptionKeyRequestProto} * *
     * no parameters
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDataEncryptionKeyRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDataEncryptionKeyRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDataEncryptionKeyRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDataEncryptionKeyRequestProto) } static { defaultInstance = new GetDataEncryptionKeyRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDataEncryptionKeyRequestProto) } public interface GetDataEncryptionKeyResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ boolean hasDataEncryptionKey(); /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDataEncryptionKey(); /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder getDataEncryptionKeyOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetDataEncryptionKeyResponseProto} */ public static final class GetDataEncryptionKeyResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetDataEncryptionKeyResponseProtoOrBuilder { // Use GetDataEncryptionKeyResponseProto.newBuilder() to construct. private GetDataEncryptionKeyResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetDataEncryptionKeyResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetDataEncryptionKeyResponseProto defaultInstance; public static GetDataEncryptionKeyResponseProto getDefaultInstance() { return defaultInstance; } public GetDataEncryptionKeyResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetDataEncryptionKeyResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = dataEncryptionKey_.toBuilder(); } dataEncryptionKey_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(dataEncryptionKey_); dataEncryptionKey_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDataEncryptionKeyResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDataEncryptionKeyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetDataEncryptionKeyResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetDataEncryptionKeyResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; public static final int DATAENCRYPTIONKEY_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto dataEncryptionKey_; /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ public boolean hasDataEncryptionKey() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDataEncryptionKey() { return dataEncryptionKey_; } /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder getDataEncryptionKeyOrBuilder() { return dataEncryptionKey_; } private void initFields() { dataEncryptionKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (hasDataEncryptionKey()) { if (!getDataEncryptionKey().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, dataEncryptionKey_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, dataEncryptionKey_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto) obj; boolean result = true; result = result && (hasDataEncryptionKey() == other.hasDataEncryptionKey()); if (hasDataEncryptionKey()) { result = result && getDataEncryptionKey() .equals(other.getDataEncryptionKey()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasDataEncryptionKey()) { hash = (37 * hash) + DATAENCRYPTIONKEY_FIELD_NUMBER; hash = (53 * hash) + getDataEncryptionKey().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetDataEncryptionKeyResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDataEncryptionKeyResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDataEncryptionKeyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getDataEncryptionKeyFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (dataEncryptionKeyBuilder_ == null) { dataEncryptionKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance(); } else { dataEncryptionKeyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetDataEncryptionKeyResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (dataEncryptionKeyBuilder_ == null) { result.dataEncryptionKey_ = dataEncryptionKey_; } else { result.dataEncryptionKey_ = dataEncryptionKeyBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.getDefaultInstance()) return this; if (other.hasDataEncryptionKey()) { mergeDataEncryptionKey(other.getDataEncryptionKey()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (hasDataEncryptionKey()) { if (!getDataEncryptionKey().isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto dataEncryptionKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder> dataEncryptionKeyBuilder_; /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ public boolean hasDataEncryptionKey() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDataEncryptionKey() { if (dataEncryptionKeyBuilder_ == null) { return dataEncryptionKey_; } else { return dataEncryptionKeyBuilder_.getMessage(); } } /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ public Builder setDataEncryptionKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto value) { if (dataEncryptionKeyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } dataEncryptionKey_ = value; onChanged(); } else { dataEncryptionKeyBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ public Builder setDataEncryptionKey( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder builderForValue) { if (dataEncryptionKeyBuilder_ == null) { dataEncryptionKey_ = builderForValue.build(); onChanged(); } else { dataEncryptionKeyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ public Builder mergeDataEncryptionKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto value) { if (dataEncryptionKeyBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && dataEncryptionKey_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance()) { dataEncryptionKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.newBuilder(dataEncryptionKey_).mergeFrom(value).buildPartial(); } else { dataEncryptionKey_ = value; } onChanged(); } else { dataEncryptionKeyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ public Builder clearDataEncryptionKey() { if (dataEncryptionKeyBuilder_ == null) { dataEncryptionKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance(); onChanged(); } else { dataEncryptionKeyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder getDataEncryptionKeyBuilder() { bitField0_ |= 0x00000001; onChanged(); return getDataEncryptionKeyFieldBuilder().getBuilder(); } /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder getDataEncryptionKeyOrBuilder() { if (dataEncryptionKeyBuilder_ != null) { return dataEncryptionKeyBuilder_.getMessageOrBuilder(); } else { return dataEncryptionKey_; } } /** * optional .hadoop.hdfs.DataEncryptionKeyProto dataEncryptionKey = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder> getDataEncryptionKeyFieldBuilder() { if (dataEncryptionKeyBuilder_ == null) { dataEncryptionKeyBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder>( dataEncryptionKey_, getParentForChildren(), isClean()); dataEncryptionKey_ = null; } return dataEncryptionKeyBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDataEncryptionKeyResponseProto) } static { defaultInstance = new GetDataEncryptionKeyResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDataEncryptionKeyResponseProto) } public interface CreateSnapshotRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string snapshotRoot = 1; /** * required string snapshotRoot = 1; */ boolean hasSnapshotRoot(); /** * required string snapshotRoot = 1; */ java.lang.String getSnapshotRoot(); /** * required string snapshotRoot = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes(); // optional string snapshotName = 2; /** * optional string snapshotName = 2; */ boolean hasSnapshotName(); /** * optional string snapshotName = 2; */ java.lang.String getSnapshotName(); /** * optional string snapshotName = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.CreateSnapshotRequestProto} */ public static final class CreateSnapshotRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CreateSnapshotRequestProtoOrBuilder { // Use CreateSnapshotRequestProto.newBuilder() to construct. private CreateSnapshotRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CreateSnapshotRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CreateSnapshotRequestProto defaultInstance; public static CreateSnapshotRequestProto getDefaultInstance() { return defaultInstance; } public CreateSnapshotRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CreateSnapshotRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; snapshotRoot_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; snapshotName_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSnapshotRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSnapshotRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CreateSnapshotRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CreateSnapshotRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string snapshotRoot = 1; public static final int SNAPSHOTROOT_FIELD_NUMBER = 1; private java.lang.Object snapshotRoot_; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // optional string snapshotName = 2; public static final int SNAPSHOTNAME_FIELD_NUMBER = 2; private java.lang.Object snapshotName_; /** * optional string snapshotName = 2; */ public boolean hasSnapshotName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string snapshotName = 2; */ public java.lang.String getSnapshotName() { java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotName_ = s; } return s; } } /** * optional string snapshotName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotNameBytes() { java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { snapshotRoot_ = ""; snapshotName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getSnapshotNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getSnapshotNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto) obj; boolean result = true; result = result && (hasSnapshotRoot() == other.hasSnapshotRoot()); if (hasSnapshotRoot()) { result = result && getSnapshotRoot() .equals(other.getSnapshotRoot()); } result = result && (hasSnapshotName() == other.hasSnapshotName()); if (hasSnapshotName()) { result = result && getSnapshotName() .equals(other.getSnapshotName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } if (hasSnapshotName()) { hash = (37 * hash) + SNAPSHOTNAME_FIELD_NUMBER; hash = (53 * hash) + getSnapshotName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CreateSnapshotRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSnapshotRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSnapshotRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); snapshotRoot_ = ""; bitField0_ = (bitField0_ & ~0x00000001); snapshotName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSnapshotRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.snapshotRoot_ = snapshotRoot_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.snapshotName_ = snapshotName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto.getDefaultInstance()) return this; if (other.hasSnapshotRoot()) { bitField0_ |= 0x00000001; snapshotRoot_ = other.snapshotRoot_; onChanged(); } if (other.hasSnapshotName()) { bitField0_ |= 0x00000002; snapshotName_ = other.snapshotName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSnapshotRoot()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string snapshotRoot = 1; private java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotRoot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder clearSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000001); snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRootBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } // optional string snapshotName = 2; private java.lang.Object snapshotName_ = ""; /** * optional string snapshotName = 2; */ public boolean hasSnapshotName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string snapshotName = 2; */ public java.lang.String getSnapshotName() { java.lang.Object ref = snapshotName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotName_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string snapshotName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotNameBytes() { java.lang.Object ref = snapshotName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string snapshotName = 2; */ public Builder setSnapshotName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; snapshotName_ = value; onChanged(); return this; } /** * optional string snapshotName = 2; */ public Builder clearSnapshotName() { bitField0_ = (bitField0_ & ~0x00000002); snapshotName_ = getDefaultInstance().getSnapshotName(); onChanged(); return this; } /** * optional string snapshotName = 2; */ public Builder setSnapshotNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; snapshotName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CreateSnapshotRequestProto) } static { defaultInstance = new CreateSnapshotRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CreateSnapshotRequestProto) } public interface CreateSnapshotResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string snapshotPath = 1; /** * required string snapshotPath = 1; */ boolean hasSnapshotPath(); /** * required string snapshotPath = 1; */ java.lang.String getSnapshotPath(); /** * required string snapshotPath = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotPathBytes(); } /** * Protobuf type {@code hadoop.hdfs.CreateSnapshotResponseProto} */ public static final class CreateSnapshotResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CreateSnapshotResponseProtoOrBuilder { // Use CreateSnapshotResponseProto.newBuilder() to construct. private CreateSnapshotResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CreateSnapshotResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CreateSnapshotResponseProto defaultInstance; public static CreateSnapshotResponseProto getDefaultInstance() { return defaultInstance; } public CreateSnapshotResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CreateSnapshotResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; snapshotPath_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSnapshotResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSnapshotResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CreateSnapshotResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CreateSnapshotResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string snapshotPath = 1; public static final int SNAPSHOTPATH_FIELD_NUMBER = 1; private java.lang.Object snapshotPath_; /** * required string snapshotPath = 1; */ public boolean hasSnapshotPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotPath = 1; */ public java.lang.String getSnapshotPath() { java.lang.Object ref = snapshotPath_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotPath_ = s; } return s; } } /** * required string snapshotPath = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotPathBytes() { java.lang.Object ref = snapshotPath_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotPath_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { snapshotPath_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSnapshotPath()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSnapshotPathBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSnapshotPathBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto) obj; boolean result = true; result = result && (hasSnapshotPath() == other.hasSnapshotPath()); if (hasSnapshotPath()) { result = result && getSnapshotPath() .equals(other.getSnapshotPath()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSnapshotPath()) { hash = (37 * hash) + SNAPSHOTPATH_FIELD_NUMBER; hash = (53 * hash) + getSnapshotPath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CreateSnapshotResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSnapshotResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSnapshotResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); snapshotPath_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CreateSnapshotResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.snapshotPath_ = snapshotPath_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.getDefaultInstance()) return this; if (other.hasSnapshotPath()) { bitField0_ |= 0x00000001; snapshotPath_ = other.snapshotPath_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSnapshotPath()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string snapshotPath = 1; private java.lang.Object snapshotPath_ = ""; /** * required string snapshotPath = 1; */ public boolean hasSnapshotPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotPath = 1; */ public java.lang.String getSnapshotPath() { java.lang.Object ref = snapshotPath_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotPath_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotPath = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotPathBytes() { java.lang.Object ref = snapshotPath_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotPath_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string snapshotPath = 1; */ public Builder setSnapshotPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotPath_ = value; onChanged(); return this; } /** * required string snapshotPath = 1; */ public Builder clearSnapshotPath() { bitField0_ = (bitField0_ & ~0x00000001); snapshotPath_ = getDefaultInstance().getSnapshotPath(); onChanged(); return this; } /** * required string snapshotPath = 1; */ public Builder setSnapshotPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotPath_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CreateSnapshotResponseProto) } static { defaultInstance = new CreateSnapshotResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CreateSnapshotResponseProto) } public interface RenameSnapshotRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string snapshotRoot = 1; /** * required string snapshotRoot = 1; */ boolean hasSnapshotRoot(); /** * required string snapshotRoot = 1; */ java.lang.String getSnapshotRoot(); /** * required string snapshotRoot = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes(); // required string snapshotOldName = 2; /** * required string snapshotOldName = 2; */ boolean hasSnapshotOldName(); /** * required string snapshotOldName = 2; */ java.lang.String getSnapshotOldName(); /** * required string snapshotOldName = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotOldNameBytes(); // required string snapshotNewName = 3; /** * required string snapshotNewName = 3; */ boolean hasSnapshotNewName(); /** * required string snapshotNewName = 3; */ java.lang.String getSnapshotNewName(); /** * required string snapshotNewName = 3; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotNewNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.RenameSnapshotRequestProto} */ public static final class RenameSnapshotRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RenameSnapshotRequestProtoOrBuilder { // Use RenameSnapshotRequestProto.newBuilder() to construct. private RenameSnapshotRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RenameSnapshotRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RenameSnapshotRequestProto defaultInstance; public static RenameSnapshotRequestProto getDefaultInstance() { return defaultInstance; } public RenameSnapshotRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RenameSnapshotRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; snapshotRoot_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; snapshotOldName_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; snapshotNewName_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameSnapshotRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameSnapshotRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RenameSnapshotRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RenameSnapshotRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string snapshotRoot = 1; public static final int SNAPSHOTROOT_FIELD_NUMBER = 1; private java.lang.Object snapshotRoot_; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string snapshotOldName = 2; public static final int SNAPSHOTOLDNAME_FIELD_NUMBER = 2; private java.lang.Object snapshotOldName_; /** * required string snapshotOldName = 2; */ public boolean hasSnapshotOldName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string snapshotOldName = 2; */ public java.lang.String getSnapshotOldName() { java.lang.Object ref = snapshotOldName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotOldName_ = s; } return s; } } /** * required string snapshotOldName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotOldNameBytes() { java.lang.Object ref = snapshotOldName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotOldName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string snapshotNewName = 3; public static final int SNAPSHOTNEWNAME_FIELD_NUMBER = 3; private java.lang.Object snapshotNewName_; /** * required string snapshotNewName = 3; */ public boolean hasSnapshotNewName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string snapshotNewName = 3; */ public java.lang.String getSnapshotNewName() { java.lang.Object ref = snapshotNewName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotNewName_ = s; } return s; } } /** * required string snapshotNewName = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotNewNameBytes() { java.lang.Object ref = snapshotNewName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotNewName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { snapshotRoot_ = ""; snapshotOldName_ = ""; snapshotNewName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotOldName()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotNewName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getSnapshotOldNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getSnapshotNewNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getSnapshotOldNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(3, getSnapshotNewNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto) obj; boolean result = true; result = result && (hasSnapshotRoot() == other.hasSnapshotRoot()); if (hasSnapshotRoot()) { result = result && getSnapshotRoot() .equals(other.getSnapshotRoot()); } result = result && (hasSnapshotOldName() == other.hasSnapshotOldName()); if (hasSnapshotOldName()) { result = result && getSnapshotOldName() .equals(other.getSnapshotOldName()); } result = result && (hasSnapshotNewName() == other.hasSnapshotNewName()); if (hasSnapshotNewName()) { result = result && getSnapshotNewName() .equals(other.getSnapshotNewName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } if (hasSnapshotOldName()) { hash = (37 * hash) + SNAPSHOTOLDNAME_FIELD_NUMBER; hash = (53 * hash) + getSnapshotOldName().hashCode(); } if (hasSnapshotNewName()) { hash = (37 * hash) + SNAPSHOTNEWNAME_FIELD_NUMBER; hash = (53 * hash) + getSnapshotNewName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RenameSnapshotRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameSnapshotRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameSnapshotRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); snapshotRoot_ = ""; bitField0_ = (bitField0_ & ~0x00000001); snapshotOldName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); snapshotNewName_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameSnapshotRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.snapshotRoot_ = snapshotRoot_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.snapshotOldName_ = snapshotOldName_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.snapshotNewName_ = snapshotNewName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto.getDefaultInstance()) return this; if (other.hasSnapshotRoot()) { bitField0_ |= 0x00000001; snapshotRoot_ = other.snapshotRoot_; onChanged(); } if (other.hasSnapshotOldName()) { bitField0_ |= 0x00000002; snapshotOldName_ = other.snapshotOldName_; onChanged(); } if (other.hasSnapshotNewName()) { bitField0_ |= 0x00000004; snapshotNewName_ = other.snapshotNewName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSnapshotRoot()) { return false; } if (!hasSnapshotOldName()) { return false; } if (!hasSnapshotNewName()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string snapshotRoot = 1; private java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotRoot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder clearSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000001); snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRootBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } // required string snapshotOldName = 2; private java.lang.Object snapshotOldName_ = ""; /** * required string snapshotOldName = 2; */ public boolean hasSnapshotOldName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string snapshotOldName = 2; */ public java.lang.String getSnapshotOldName() { java.lang.Object ref = snapshotOldName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotOldName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotOldName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotOldNameBytes() { java.lang.Object ref = snapshotOldName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotOldName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string snapshotOldName = 2; */ public Builder setSnapshotOldName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; snapshotOldName_ = value; onChanged(); return this; } /** * required string snapshotOldName = 2; */ public Builder clearSnapshotOldName() { bitField0_ = (bitField0_ & ~0x00000002); snapshotOldName_ = getDefaultInstance().getSnapshotOldName(); onChanged(); return this; } /** * required string snapshotOldName = 2; */ public Builder setSnapshotOldNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; snapshotOldName_ = value; onChanged(); return this; } // required string snapshotNewName = 3; private java.lang.Object snapshotNewName_ = ""; /** * required string snapshotNewName = 3; */ public boolean hasSnapshotNewName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string snapshotNewName = 3; */ public java.lang.String getSnapshotNewName() { java.lang.Object ref = snapshotNewName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotNewName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotNewName = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotNewNameBytes() { java.lang.Object ref = snapshotNewName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotNewName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string snapshotNewName = 3; */ public Builder setSnapshotNewName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; snapshotNewName_ = value; onChanged(); return this; } /** * required string snapshotNewName = 3; */ public Builder clearSnapshotNewName() { bitField0_ = (bitField0_ & ~0x00000004); snapshotNewName_ = getDefaultInstance().getSnapshotNewName(); onChanged(); return this; } /** * required string snapshotNewName = 3; */ public Builder setSnapshotNewNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; snapshotNewName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RenameSnapshotRequestProto) } static { defaultInstance = new RenameSnapshotRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RenameSnapshotRequestProto) } public interface RenameSnapshotResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.RenameSnapshotResponseProto} * *
   * void response
   * 
*/ public static final class RenameSnapshotResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements RenameSnapshotResponseProtoOrBuilder { // Use RenameSnapshotResponseProto.newBuilder() to construct. private RenameSnapshotResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RenameSnapshotResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RenameSnapshotResponseProto defaultInstance; public static RenameSnapshotResponseProto getDefaultInstance() { return defaultInstance; } public RenameSnapshotResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RenameSnapshotResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameSnapshotResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameSnapshotResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public RenameSnapshotResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new RenameSnapshotResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.RenameSnapshotResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameSnapshotResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameSnapshotResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_RenameSnapshotResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RenameSnapshotResponseProto) } static { defaultInstance = new RenameSnapshotResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.RenameSnapshotResponseProto) } public interface AllowSnapshotRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string snapshotRoot = 1; /** * required string snapshotRoot = 1; */ boolean hasSnapshotRoot(); /** * required string snapshotRoot = 1; */ java.lang.String getSnapshotRoot(); /** * required string snapshotRoot = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes(); } /** * Protobuf type {@code hadoop.hdfs.AllowSnapshotRequestProto} */ public static final class AllowSnapshotRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AllowSnapshotRequestProtoOrBuilder { // Use AllowSnapshotRequestProto.newBuilder() to construct. private AllowSnapshotRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AllowSnapshotRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AllowSnapshotRequestProto defaultInstance; public static AllowSnapshotRequestProto getDefaultInstance() { return defaultInstance; } public AllowSnapshotRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AllowSnapshotRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; snapshotRoot_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AllowSnapshotRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AllowSnapshotRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AllowSnapshotRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AllowSnapshotRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string snapshotRoot = 1; public static final int SNAPSHOTROOT_FIELD_NUMBER = 1; private java.lang.Object snapshotRoot_; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { snapshotRoot_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSnapshotRootBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSnapshotRootBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto) obj; boolean result = true; result = result && (hasSnapshotRoot() == other.hasSnapshotRoot()); if (hasSnapshotRoot()) { result = result && getSnapshotRoot() .equals(other.getSnapshotRoot()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AllowSnapshotRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AllowSnapshotRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AllowSnapshotRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); snapshotRoot_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AllowSnapshotRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.snapshotRoot_ = snapshotRoot_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto.getDefaultInstance()) return this; if (other.hasSnapshotRoot()) { bitField0_ |= 0x00000001; snapshotRoot_ = other.snapshotRoot_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSnapshotRoot()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string snapshotRoot = 1; private java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotRoot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder clearSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000001); snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRootBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AllowSnapshotRequestProto) } static { defaultInstance = new AllowSnapshotRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AllowSnapshotRequestProto) } public interface AllowSnapshotResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.AllowSnapshotResponseProto} */ public static final class AllowSnapshotResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements AllowSnapshotResponseProtoOrBuilder { // Use AllowSnapshotResponseProto.newBuilder() to construct. private AllowSnapshotResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private AllowSnapshotResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final AllowSnapshotResponseProto defaultInstance; public static AllowSnapshotResponseProto getDefaultInstance() { return defaultInstance; } public AllowSnapshotResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private AllowSnapshotResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AllowSnapshotResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AllowSnapshotResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public AllowSnapshotResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new AllowSnapshotResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.AllowSnapshotResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AllowSnapshotResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AllowSnapshotResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_AllowSnapshotResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AllowSnapshotResponseProto) } static { defaultInstance = new AllowSnapshotResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.AllowSnapshotResponseProto) } public interface DisallowSnapshotRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string snapshotRoot = 1; /** * required string snapshotRoot = 1; */ boolean hasSnapshotRoot(); /** * required string snapshotRoot = 1; */ java.lang.String getSnapshotRoot(); /** * required string snapshotRoot = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes(); } /** * Protobuf type {@code hadoop.hdfs.DisallowSnapshotRequestProto} */ public static final class DisallowSnapshotRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements DisallowSnapshotRequestProtoOrBuilder { // Use DisallowSnapshotRequestProto.newBuilder() to construct. private DisallowSnapshotRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DisallowSnapshotRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DisallowSnapshotRequestProto defaultInstance; public static DisallowSnapshotRequestProto getDefaultInstance() { return defaultInstance; } public DisallowSnapshotRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DisallowSnapshotRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; snapshotRoot_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DisallowSnapshotRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DisallowSnapshotRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public DisallowSnapshotRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new DisallowSnapshotRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string snapshotRoot = 1; public static final int SNAPSHOTROOT_FIELD_NUMBER = 1; private java.lang.Object snapshotRoot_; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { snapshotRoot_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSnapshotRootBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSnapshotRootBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto) obj; boolean result = true; result = result && (hasSnapshotRoot() == other.hasSnapshotRoot()); if (hasSnapshotRoot()) { result = result && getSnapshotRoot() .equals(other.getSnapshotRoot()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DisallowSnapshotRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DisallowSnapshotRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DisallowSnapshotRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); snapshotRoot_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DisallowSnapshotRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.snapshotRoot_ = snapshotRoot_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto.getDefaultInstance()) return this; if (other.hasSnapshotRoot()) { bitField0_ |= 0x00000001; snapshotRoot_ = other.snapshotRoot_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSnapshotRoot()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string snapshotRoot = 1; private java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotRoot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder clearSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000001); snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRootBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DisallowSnapshotRequestProto) } static { defaultInstance = new DisallowSnapshotRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DisallowSnapshotRequestProto) } public interface DisallowSnapshotResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.DisallowSnapshotResponseProto} */ public static final class DisallowSnapshotResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements DisallowSnapshotResponseProtoOrBuilder { // Use DisallowSnapshotResponseProto.newBuilder() to construct. private DisallowSnapshotResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DisallowSnapshotResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DisallowSnapshotResponseProto defaultInstance; public static DisallowSnapshotResponseProto getDefaultInstance() { return defaultInstance; } public DisallowSnapshotResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DisallowSnapshotResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DisallowSnapshotResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DisallowSnapshotResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public DisallowSnapshotResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new DisallowSnapshotResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DisallowSnapshotResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DisallowSnapshotResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DisallowSnapshotResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DisallowSnapshotResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DisallowSnapshotResponseProto) } static { defaultInstance = new DisallowSnapshotResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DisallowSnapshotResponseProto) } public interface DeleteSnapshotRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string snapshotRoot = 1; /** * required string snapshotRoot = 1; */ boolean hasSnapshotRoot(); /** * required string snapshotRoot = 1; */ java.lang.String getSnapshotRoot(); /** * required string snapshotRoot = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes(); // required string snapshotName = 2; /** * required string snapshotName = 2; */ boolean hasSnapshotName(); /** * required string snapshotName = 2; */ java.lang.String getSnapshotName(); /** * required string snapshotName = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotNameBytes(); } /** * Protobuf type {@code hadoop.hdfs.DeleteSnapshotRequestProto} */ public static final class DeleteSnapshotRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements DeleteSnapshotRequestProtoOrBuilder { // Use DeleteSnapshotRequestProto.newBuilder() to construct. private DeleteSnapshotRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DeleteSnapshotRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DeleteSnapshotRequestProto defaultInstance; public static DeleteSnapshotRequestProto getDefaultInstance() { return defaultInstance; } public DeleteSnapshotRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DeleteSnapshotRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; snapshotRoot_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; snapshotName_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteSnapshotRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteSnapshotRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public DeleteSnapshotRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new DeleteSnapshotRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string snapshotRoot = 1; public static final int SNAPSHOTROOT_FIELD_NUMBER = 1; private java.lang.Object snapshotRoot_; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotRoot_ = s; } return s; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string snapshotName = 2; public static final int SNAPSHOTNAME_FIELD_NUMBER = 2; private java.lang.Object snapshotName_; /** * required string snapshotName = 2; */ public boolean hasSnapshotName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string snapshotName = 2; */ public java.lang.String getSnapshotName() { java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { snapshotName_ = s; } return s; } } /** * required string snapshotName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotNameBytes() { java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { snapshotRoot_ = ""; snapshotName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSnapshotRoot()) { memoizedIsInitialized = 0; return false; } if (!hasSnapshotName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getSnapshotNameBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSnapshotRootBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getSnapshotNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto) obj; boolean result = true; result = result && (hasSnapshotRoot() == other.hasSnapshotRoot()); if (hasSnapshotRoot()) { result = result && getSnapshotRoot() .equals(other.getSnapshotRoot()); } result = result && (hasSnapshotName() == other.hasSnapshotName()); if (hasSnapshotName()) { result = result && getSnapshotName() .equals(other.getSnapshotName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSnapshotRoot()) { hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshotRoot().hashCode(); } if (hasSnapshotName()) { hash = (37 * hash) + SNAPSHOTNAME_FIELD_NUMBER; hash = (53 * hash) + getSnapshotName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DeleteSnapshotRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteSnapshotRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteSnapshotRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); snapshotRoot_ = ""; bitField0_ = (bitField0_ & ~0x00000001); snapshotName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteSnapshotRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.snapshotRoot_ = snapshotRoot_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.snapshotName_ = snapshotName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto.getDefaultInstance()) return this; if (other.hasSnapshotRoot()) { bitField0_ |= 0x00000001; snapshotRoot_ = other.snapshotRoot_; onChanged(); } if (other.hasSnapshotName()) { bitField0_ |= 0x00000002; snapshotName_ = other.snapshotName_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSnapshotRoot()) { return false; } if (!hasSnapshotName()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string snapshotRoot = 1; private java.lang.Object snapshotRoot_ = ""; /** * required string snapshotRoot = 1; */ public boolean hasSnapshotRoot() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string snapshotRoot = 1; */ public java.lang.String getSnapshotRoot() { java.lang.Object ref = snapshotRoot_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotRoot_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotRoot = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotRootBytes() { java.lang.Object ref = snapshotRoot_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotRoot_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRoot( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder clearSnapshotRoot() { bitField0_ = (bitField0_ & ~0x00000001); snapshotRoot_ = getDefaultInstance().getSnapshotRoot(); onChanged(); return this; } /** * required string snapshotRoot = 1; */ public Builder setSnapshotRootBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; snapshotRoot_ = value; onChanged(); return this; } // required string snapshotName = 2; private java.lang.Object snapshotName_ = ""; /** * required string snapshotName = 2; */ public boolean hasSnapshotName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string snapshotName = 2; */ public java.lang.String getSnapshotName() { java.lang.Object ref = snapshotName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); snapshotName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string snapshotName = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSnapshotNameBytes() { java.lang.Object ref = snapshotName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); snapshotName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string snapshotName = 2; */ public Builder setSnapshotName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; snapshotName_ = value; onChanged(); return this; } /** * required string snapshotName = 2; */ public Builder clearSnapshotName() { bitField0_ = (bitField0_ & ~0x00000002); snapshotName_ = getDefaultInstance().getSnapshotName(); onChanged(); return this; } /** * required string snapshotName = 2; */ public Builder setSnapshotNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; snapshotName_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DeleteSnapshotRequestProto) } static { defaultInstance = new DeleteSnapshotRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DeleteSnapshotRequestProto) } public interface DeleteSnapshotResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.DeleteSnapshotResponseProto} * *
   * void response
   * 
*/ public static final class DeleteSnapshotResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements DeleteSnapshotResponseProtoOrBuilder { // Use DeleteSnapshotResponseProto.newBuilder() to construct. private DeleteSnapshotResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private DeleteSnapshotResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final DeleteSnapshotResponseProto defaultInstance; public static DeleteSnapshotResponseProto getDefaultInstance() { return defaultInstance; } public DeleteSnapshotResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DeleteSnapshotResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteSnapshotResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteSnapshotResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public DeleteSnapshotResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new DeleteSnapshotResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.DeleteSnapshotResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteSnapshotResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteSnapshotResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_DeleteSnapshotResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DeleteSnapshotResponseProto) } static { defaultInstance = new DeleteSnapshotResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.DeleteSnapshotResponseProto) } public interface CheckAccessRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string path = 1; /** * required string path = 1; */ boolean hasPath(); /** * required string path = 1; */ java.lang.String getPath(); /** * required string path = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes(); // required .hadoop.hdfs.AclEntryProto.FsActionProto mode = 2; /** * required .hadoop.hdfs.AclEntryProto.FsActionProto mode = 2; */ boolean hasMode(); /** * required .hadoop.hdfs.AclEntryProto.FsActionProto mode = 2; */ org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto getMode(); } /** * Protobuf type {@code hadoop.hdfs.CheckAccessRequestProto} */ public static final class CheckAccessRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CheckAccessRequestProtoOrBuilder { // Use CheckAccessRequestProto.newBuilder() to construct. private CheckAccessRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CheckAccessRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CheckAccessRequestProto defaultInstance; public static CheckAccessRequestProto getDefaultInstance() { return defaultInstance; } public CheckAccessRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CheckAccessRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; path_ = input.readBytes(); break; } case 16: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto value = org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { bitField0_ |= 0x00000002; mode_ = value; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CheckAccessRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CheckAccessRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CheckAccessRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CheckAccessRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string path = 1; public static final int PATH_FIELD_NUMBER = 1; private java.lang.Object path_; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required .hadoop.hdfs.AclEntryProto.FsActionProto mode = 2; public static final int MODE_FIELD_NUMBER = 2; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto mode_; /** * required .hadoop.hdfs.AclEntryProto.FsActionProto mode = 2; */ public boolean hasMode() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.AclEntryProto.FsActionProto mode = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto getMode() { return mode_; } private void initFields() { path_ = ""; mode_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto.NONE; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPath()) { memoizedIsInitialized = 0; return false; } if (!hasMode()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getPathBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeEnum(2, mode_.getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getPathBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeEnumSize(2, mode_.getNumber()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto) obj; boolean result = true; result = result && (hasPath() == other.hasPath()); if (hasPath()) { result = result && getPath() .equals(other.getPath()); } result = result && (hasMode() == other.hasMode()); if (hasMode()) { result = result && (getMode() == other.getMode()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasMode()) { hash = (37 * hash) + MODE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getMode()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CheckAccessRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CheckAccessRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CheckAccessRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); mode_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto.NONE; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CheckAccessRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.path_ = path_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.mode_ = mode_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto.getDefaultInstance()) return this; if (other.hasPath()) { bitField0_ |= 0x00000001; path_ = other.path_; onChanged(); } if (other.hasMode()) { setMode(other.getMode()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPath()) { return false; } if (!hasMode()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string path = 1; private java.lang.Object path_ = ""; /** * required string path = 1; */ public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string path = 1; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); path_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string path = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string path = 1; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } /** * required string path = 1; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000001); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * required string path = 1; */ public Builder setPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; path_ = value; onChanged(); return this; } // required .hadoop.hdfs.AclEntryProto.FsActionProto mode = 2; private org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto mode_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto.NONE; /** * required .hadoop.hdfs.AclEntryProto.FsActionProto mode = 2; */ public boolean hasMode() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .hadoop.hdfs.AclEntryProto.FsActionProto mode = 2; */ public org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto getMode() { return mode_; } /** * required .hadoop.hdfs.AclEntryProto.FsActionProto mode = 2; */ public Builder setMode(org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; mode_ = value; onChanged(); return this; } /** * required .hadoop.hdfs.AclEntryProto.FsActionProto mode = 2; */ public Builder clearMode() { bitField0_ = (bitField0_ & ~0x00000002); mode_ = org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto.NONE; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CheckAccessRequestProto) } static { defaultInstance = new CheckAccessRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CheckAccessRequestProto) } public interface CheckAccessResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.CheckAccessResponseProto} * *
   * void response
   * 
*/ public static final class CheckAccessResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements CheckAccessResponseProtoOrBuilder { // Use CheckAccessResponseProto.newBuilder() to construct. private CheckAccessResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private CheckAccessResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final CheckAccessResponseProto defaultInstance; public static CheckAccessResponseProto getDefaultInstance() { return defaultInstance; } public CheckAccessResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CheckAccessResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CheckAccessResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CheckAccessResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public CheckAccessResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new CheckAccessResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.CheckAccessResponseProto} * *
     * void response
     * 
*/ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CheckAccessResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CheckAccessResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_CheckAccessResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CheckAccessResponseProto) } static { defaultInstance = new CheckAccessResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.CheckAccessResponseProto) } public interface GetCurrentEditLogTxidRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.GetCurrentEditLogTxidRequestProto} */ public static final class GetCurrentEditLogTxidRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetCurrentEditLogTxidRequestProtoOrBuilder { // Use GetCurrentEditLogTxidRequestProto.newBuilder() to construct. private GetCurrentEditLogTxidRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetCurrentEditLogTxidRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetCurrentEditLogTxidRequestProto defaultInstance; public static GetCurrentEditLogTxidRequestProto getDefaultInstance() { return defaultInstance; } public GetCurrentEditLogTxidRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetCurrentEditLogTxidRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetCurrentEditLogTxidRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetCurrentEditLogTxidRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetCurrentEditLogTxidRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetCurrentEditLogTxidRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetCurrentEditLogTxidRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetCurrentEditLogTxidRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetCurrentEditLogTxidRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetCurrentEditLogTxidRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetCurrentEditLogTxidRequestProto) } static { defaultInstance = new GetCurrentEditLogTxidRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetCurrentEditLogTxidRequestProto) } public interface GetCurrentEditLogTxidResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required int64 txid = 1; /** * required int64 txid = 1; */ boolean hasTxid(); /** * required int64 txid = 1; */ long getTxid(); } /** * Protobuf type {@code hadoop.hdfs.GetCurrentEditLogTxidResponseProto} */ public static final class GetCurrentEditLogTxidResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetCurrentEditLogTxidResponseProtoOrBuilder { // Use GetCurrentEditLogTxidResponseProto.newBuilder() to construct. private GetCurrentEditLogTxidResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetCurrentEditLogTxidResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetCurrentEditLogTxidResponseProto defaultInstance; public static GetCurrentEditLogTxidResponseProto getDefaultInstance() { return defaultInstance; } public GetCurrentEditLogTxidResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetCurrentEditLogTxidResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; txid_ = input.readInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetCurrentEditLogTxidResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetCurrentEditLogTxidResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetCurrentEditLogTxidResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetCurrentEditLogTxidResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 txid = 1; public static final int TXID_FIELD_NUMBER = 1; private long txid_; /** * required int64 txid = 1; */ public boolean hasTxid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 txid = 1; */ public long getTxid() { return txid_; } private void initFields() { txid_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasTxid()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, txid_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, txid_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto) obj; boolean result = true; result = result && (hasTxid() == other.hasTxid()); if (hasTxid()) { result = result && (getTxid() == other.getTxid()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasTxid()) { hash = (37 * hash) + TXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getTxid()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetCurrentEditLogTxidResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetCurrentEditLogTxidResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetCurrentEditLogTxidResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); txid_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetCurrentEditLogTxidResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.txid_ = txid_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.getDefaultInstance()) return this; if (other.hasTxid()) { setTxid(other.getTxid()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasTxid()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 txid = 1; private long txid_ ; /** * required int64 txid = 1; */ public boolean hasTxid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 txid = 1; */ public long getTxid() { return txid_; } /** * required int64 txid = 1; */ public Builder setTxid(long value) { bitField0_ |= 0x00000001; txid_ = value; onChanged(); return this; } /** * required int64 txid = 1; */ public Builder clearTxid() { bitField0_ = (bitField0_ & ~0x00000001); txid_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetCurrentEditLogTxidResponseProto) } static { defaultInstance = new GetCurrentEditLogTxidResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetCurrentEditLogTxidResponseProto) } public interface GetEditsFromTxidRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required int64 txid = 1; /** * required int64 txid = 1; */ boolean hasTxid(); /** * required int64 txid = 1; */ long getTxid(); } /** * Protobuf type {@code hadoop.hdfs.GetEditsFromTxidRequestProto} */ public static final class GetEditsFromTxidRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetEditsFromTxidRequestProtoOrBuilder { // Use GetEditsFromTxidRequestProto.newBuilder() to construct. private GetEditsFromTxidRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetEditsFromTxidRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetEditsFromTxidRequestProto defaultInstance; public static GetEditsFromTxidRequestProto getDefaultInstance() { return defaultInstance; } public GetEditsFromTxidRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetEditsFromTxidRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; txid_ = input.readInt64(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetEditsFromTxidRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetEditsFromTxidRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetEditsFromTxidRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetEditsFromTxidRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 txid = 1; public static final int TXID_FIELD_NUMBER = 1; private long txid_; /** * required int64 txid = 1; */ public boolean hasTxid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 txid = 1; */ public long getTxid() { return txid_; } private void initFields() { txid_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasTxid()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, txid_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, txid_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto) obj; boolean result = true; result = result && (hasTxid() == other.hasTxid()); if (hasTxid()) { result = result && (getTxid() == other.getTxid()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasTxid()) { hash = (37 * hash) + TXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getTxid()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetEditsFromTxidRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetEditsFromTxidRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetEditsFromTxidRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); txid_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetEditsFromTxidRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.txid_ = txid_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto.getDefaultInstance()) return this; if (other.hasTxid()) { setTxid(other.getTxid()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasTxid()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 txid = 1; private long txid_ ; /** * required int64 txid = 1; */ public boolean hasTxid() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 txid = 1; */ public long getTxid() { return txid_; } /** * required int64 txid = 1; */ public Builder setTxid(long value) { bitField0_ |= 0x00000001; txid_ = value; onChanged(); return this; } /** * required int64 txid = 1; */ public Builder clearTxid() { bitField0_ = (bitField0_ & ~0x00000001); txid_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditsFromTxidRequestProto) } static { defaultInstance = new GetEditsFromTxidRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditsFromTxidRequestProto) } public interface GetEditsFromTxidResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required .hadoop.hdfs.EventsListProto eventsList = 1; /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ boolean hasEventsList(); /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto getEventsList(); /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProtoOrBuilder getEventsListOrBuilder(); } /** * Protobuf type {@code hadoop.hdfs.GetEditsFromTxidResponseProto} */ public static final class GetEditsFromTxidResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements GetEditsFromTxidResponseProtoOrBuilder { // Use GetEditsFromTxidResponseProto.newBuilder() to construct. private GetEditsFromTxidResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetEditsFromTxidResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetEditsFromTxidResponseProto defaultInstance; public static GetEditsFromTxidResponseProto getDefaultInstance() { return defaultInstance; } public GetEditsFromTxidResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetEditsFromTxidResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = eventsList_.toBuilder(); } eventsList_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(eventsList_); eventsList_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetEditsFromTxidResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetEditsFromTxidResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public GetEditsFromTxidResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new GetEditsFromTxidResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required .hadoop.hdfs.EventsListProto eventsList = 1; public static final int EVENTSLIST_FIELD_NUMBER = 1; private org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto eventsList_; /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ public boolean hasEventsList() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto getEventsList() { return eventsList_; } /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProtoOrBuilder getEventsListOrBuilder() { return eventsList_; } private void initFields() { eventsList_ = org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasEventsList()) { memoizedIsInitialized = 0; return false; } if (!getEventsList().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, eventsList_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, eventsList_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto) obj; boolean result = true; result = result && (hasEventsList() == other.hasEventsList()); if (hasEventsList()) { result = result && getEventsList() .equals(other.getEventsList()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasEventsList()) { hash = (37 * hash) + EVENTSLIST_FIELD_NUMBER; hash = (53 * hash) + getEventsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.GetEditsFromTxidResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetEditsFromTxidResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetEditsFromTxidResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getEventsListFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (eventsListBuilder_ == null) { eventsList_ = org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.getDefaultInstance(); } else { eventsListBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_GetEditsFromTxidResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (eventsListBuilder_ == null) { result.eventsList_ = eventsList_; } else { result.eventsList_ = eventsListBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.getDefaultInstance()) return this; if (other.hasEventsList()) { mergeEventsList(other.getEventsList()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasEventsList()) { return false; } if (!getEventsList().isInitialized()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .hadoop.hdfs.EventsListProto eventsList = 1; private org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto eventsList_ = org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.getDefaultInstance(); private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProtoOrBuilder> eventsListBuilder_; /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ public boolean hasEventsList() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto getEventsList() { if (eventsListBuilder_ == null) { return eventsList_; } else { return eventsListBuilder_.getMessage(); } } /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ public Builder setEventsList(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto value) { if (eventsListBuilder_ == null) { if (value == null) { throw new NullPointerException(); } eventsList_ = value; onChanged(); } else { eventsListBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ public Builder setEventsList( org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.Builder builderForValue) { if (eventsListBuilder_ == null) { eventsList_ = builderForValue.build(); onChanged(); } else { eventsListBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ public Builder mergeEventsList(org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto value) { if (eventsListBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && eventsList_ != org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.getDefaultInstance()) { eventsList_ = org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.newBuilder(eventsList_).mergeFrom(value).buildPartial(); } else { eventsList_ = value; } onChanged(); } else { eventsListBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ public Builder clearEventsList() { if (eventsListBuilder_ == null) { eventsList_ = org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.getDefaultInstance(); onChanged(); } else { eventsListBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.Builder getEventsListBuilder() { bitField0_ |= 0x00000001; onChanged(); return getEventsListFieldBuilder().getBuilder(); } /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ public org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProtoOrBuilder getEventsListOrBuilder() { if (eventsListBuilder_ != null) { return eventsListBuilder_.getMessageOrBuilder(); } else { return eventsList_; } } /** * required .hadoop.hdfs.EventsListProto eventsList = 1; */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProtoOrBuilder> getEventsListFieldBuilder() { if (eventsListBuilder_ == null) { eventsListBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProto.Builder, org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.EventsListProtoOrBuilder>( eventsList_, getParentForChildren(), isClean()); eventsList_ = null; } return eventsListBuilder_; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditsFromTxidResponseProto) } static { defaultInstance = new GetEditsFromTxidResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditsFromTxidResponseProto) } public interface ListOpenFilesRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required int64 id = 1; /** * required int64 id = 1; */ boolean hasId(); /** * required int64 id = 1; */ long getId(); // repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ java.util.List getTypesList(); /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ int getTypesCount(); /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto getTypes(int index); // optional string path = 3; /** * optional string path = 3; */ boolean hasPath(); /** * optional string path = 3; */ java.lang.String getPath(); /** * optional string path = 3; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes(); } /** * Protobuf type {@code hadoop.hdfs.ListOpenFilesRequestProto} */ public static final class ListOpenFilesRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ListOpenFilesRequestProtoOrBuilder { // Use ListOpenFilesRequestProto.newBuilder() to construct. private ListOpenFilesRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ListOpenFilesRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ListOpenFilesRequestProto defaultInstance; public static ListOpenFilesRequestProto getDefaultInstance() { return defaultInstance; } public ListOpenFilesRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ListOpenFilesRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; id_ = input.readInt64(); break; } case 16: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { types_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000002; } types_.add(value); } break; } case 18: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { types_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000002; } types_.add(value); } } input.popLimit(oldLimit); break; } case 26: { bitField0_ |= 0x00000002; path_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { types_ = java.util.Collections.unmodifiableList(types_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListOpenFilesRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListOpenFilesRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ListOpenFilesRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ListOpenFilesRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 id = 1; public static final int ID_FIELD_NUMBER = 1; private long id_; /** * required int64 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 id = 1; */ public long getId() { return id_; } // repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; public static final int TYPES_FIELD_NUMBER = 2; private java.util.List types_; /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ public java.util.List getTypesList() { return types_; } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ public int getTypesCount() { return types_.size(); } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto getTypes(int index) { return types_.get(index); } // optional string path = 3; public static final int PATH_FIELD_NUMBER = 3; private java.lang.Object path_; /** * optional string path = 3; */ public boolean hasPath() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string path = 3; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * optional string path = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { id_ = 0L; types_ = java.util.Collections.emptyList(); path_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, id_); } for (int i = 0; i < types_.size(); i++) { output.writeEnum(2, types_.get(i).getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(3, getPathBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, id_); } { int dataSize = 0; for (int i = 0; i < types_.size(); i++) { dataSize += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeEnumSizeNoTag(types_.get(i).getNumber()); } size += dataSize; size += 1 * types_.size(); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(3, getPathBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto) obj; boolean result = true; result = result && (hasId() == other.hasId()); if (hasId()) { result = result && (getId() == other.getId()); } result = result && getTypesList() .equals(other.getTypesList()); result = result && (hasPath() == other.hasPath()); if (hasPath()) { result = result && getPath() .equals(other.getPath()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getId()); } if (getTypesCount() > 0) { hash = (37 * hash) + TYPES_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getTypesList()); } if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ListOpenFilesRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListOpenFilesRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListOpenFilesRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); id_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); types_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); path_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListOpenFilesRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.id_ = id_; if (((bitField0_ & 0x00000002) == 0x00000002)) { types_ = java.util.Collections.unmodifiableList(types_); bitField0_ = (bitField0_ & ~0x00000002); } result.types_ = types_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000002; } result.path_ = path_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto.getDefaultInstance()) return this; if (other.hasId()) { setId(other.getId()); } if (!other.types_.isEmpty()) { if (types_.isEmpty()) { types_ = other.types_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureTypesIsMutable(); types_.addAll(other.types_); } onChanged(); } if (other.hasPath()) { bitField0_ |= 0x00000004; path_ = other.path_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasId()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 id = 1; private long id_ ; /** * required int64 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 id = 1; */ public long getId() { return id_; } /** * required int64 id = 1; */ public Builder setId(long value) { bitField0_ |= 0x00000001; id_ = value; onChanged(); return this; } /** * required int64 id = 1; */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000001); id_ = 0L; onChanged(); return this; } // repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; private java.util.List types_ = java.util.Collections.emptyList(); private void ensureTypesIsMutable() { if (!((bitField0_ & 0x00000002) == 0x00000002)) { types_ = new java.util.ArrayList(types_); bitField0_ |= 0x00000002; } } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ public java.util.List getTypesList() { return java.util.Collections.unmodifiableList(types_); } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ public int getTypesCount() { return types_.size(); } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto getTypes(int index) { return types_.get(index); } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ public Builder setTypes( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureTypesIsMutable(); types_.set(index, value); onChanged(); return this; } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ public Builder addTypes(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureTypesIsMutable(); types_.add(value); onChanged(); return this; } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ public Builder addAllTypes( java.lang.Iterable values) { ensureTypesIsMutable(); super.addAll(values, types_); onChanged(); return this; } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 2; */ public Builder clearTypes() { types_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } // optional string path = 3; private java.lang.Object path_ = ""; /** * optional string path = 3; */ public boolean hasPath() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional string path = 3; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); path_ = s; return s; } else { return (java.lang.String) ref; } } /** * optional string path = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * optional string path = 3; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; path_ = value; onChanged(); return this; } /** * optional string path = 3; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000004); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * optional string path = 3; */ public Builder setPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; path_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ListOpenFilesRequestProto) } static { defaultInstance = new ListOpenFilesRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ListOpenFilesRequestProto) } public interface OpenFilesBatchResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required int64 id = 1; /** * required int64 id = 1; */ boolean hasId(); /** * required int64 id = 1; */ long getId(); // required string path = 2; /** * required string path = 2; */ boolean hasPath(); /** * required string path = 2; */ java.lang.String getPath(); /** * required string path = 2; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes(); // required string clientName = 3; /** * required string clientName = 3; */ boolean hasClientName(); /** * required string clientName = 3; */ java.lang.String getClientName(); /** * required string clientName = 3; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes(); // required string clientMachine = 4; /** * required string clientMachine = 4; */ boolean hasClientMachine(); /** * required string clientMachine = 4; */ java.lang.String getClientMachine(); /** * required string clientMachine = 4; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientMachineBytes(); } /** * Protobuf type {@code hadoop.hdfs.OpenFilesBatchResponseProto} */ public static final class OpenFilesBatchResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements OpenFilesBatchResponseProtoOrBuilder { // Use OpenFilesBatchResponseProto.newBuilder() to construct. private OpenFilesBatchResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private OpenFilesBatchResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final OpenFilesBatchResponseProto defaultInstance; public static OpenFilesBatchResponseProto getDefaultInstance() { return defaultInstance; } public OpenFilesBatchResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private OpenFilesBatchResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; id_ = input.readInt64(); break; } case 18: { bitField0_ |= 0x00000002; path_ = input.readBytes(); break; } case 26: { bitField0_ |= 0x00000004; clientName_ = input.readBytes(); break; } case 34: { bitField0_ |= 0x00000008; clientMachine_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_OpenFilesBatchResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_OpenFilesBatchResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public OpenFilesBatchResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new OpenFilesBatchResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required int64 id = 1; public static final int ID_FIELD_NUMBER = 1; private long id_; /** * required int64 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 id = 1; */ public long getId() { return id_; } // required string path = 2; public static final int PATH_FIELD_NUMBER = 2; private java.lang.Object path_; /** * required string path = 2; */ public boolean hasPath() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string path = 2; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { path_ = s; } return s; } } /** * required string path = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string clientName = 3; public static final int CLIENTNAME_FIELD_NUMBER = 3; private java.lang.Object clientName_; /** * required string clientName = 3; */ public boolean hasClientName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string clientName = 3; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientName_ = s; } return s; } } /** * required string clientName = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } // required string clientMachine = 4; public static final int CLIENTMACHINE_FIELD_NUMBER = 4; private java.lang.Object clientMachine_; /** * required string clientMachine = 4; */ public boolean hasClientMachine() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required string clientMachine = 4; */ public java.lang.String getClientMachine() { java.lang.Object ref = clientMachine_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { clientMachine_ = s; } return s; } } /** * required string clientMachine = 4; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientMachineBytes() { java.lang.Object ref = clientMachine_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientMachine_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { id_ = 0L; path_ = ""; clientName_ = ""; clientMachine_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasId()) { memoizedIsInitialized = 0; return false; } if (!hasPath()) { memoizedIsInitialized = 0; return false; } if (!hasClientName()) { memoizedIsInitialized = 0; return false; } if (!hasClientMachine()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, id_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getPathBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getClientNameBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, getClientMachineBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeInt64Size(1, id_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(2, getPathBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(3, getClientNameBytes()); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(4, getClientMachineBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto) obj; boolean result = true; result = result && (hasId() == other.hasId()); if (hasId()) { result = result && (getId() == other.getId()); } result = result && (hasPath() == other.hasPath()); if (hasPath()) { result = result && getPath() .equals(other.getPath()); } result = result && (hasClientName() == other.hasClientName()); if (hasClientName()) { result = result && getClientName() .equals(other.getClientName()); } result = result && (hasClientMachine() == other.hasClientMachine()); if (hasClientMachine()) { result = result && getClientMachine() .equals(other.getClientMachine()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasId()) { hash = (37 * hash) + ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getId()); } if (hasPath()) { hash = (37 * hash) + PATH_FIELD_NUMBER; hash = (53 * hash) + getPath().hashCode(); } if (hasClientName()) { hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; hash = (53 * hash) + getClientName().hashCode(); } if (hasClientMachine()) { hash = (37 * hash) + CLIENTMACHINE_FIELD_NUMBER; hash = (53 * hash) + getClientMachine().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.OpenFilesBatchResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_OpenFilesBatchResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_OpenFilesBatchResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); id_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); path_ = ""; bitField0_ = (bitField0_ & ~0x00000002); clientName_ = ""; bitField0_ = (bitField0_ & ~0x00000004); clientMachine_ = ""; bitField0_ = (bitField0_ & ~0x00000008); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_OpenFilesBatchResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.id_ = id_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.path_ = path_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.clientName_ = clientName_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.clientMachine_ = clientMachine_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.getDefaultInstance()) return this; if (other.hasId()) { setId(other.getId()); } if (other.hasPath()) { bitField0_ |= 0x00000002; path_ = other.path_; onChanged(); } if (other.hasClientName()) { bitField0_ |= 0x00000004; clientName_ = other.clientName_; onChanged(); } if (other.hasClientMachine()) { bitField0_ |= 0x00000008; clientMachine_ = other.clientMachine_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasId()) { return false; } if (!hasPath()) { return false; } if (!hasClientName()) { return false; } if (!hasClientMachine()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required int64 id = 1; private long id_ ; /** * required int64 id = 1; */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 id = 1; */ public long getId() { return id_; } /** * required int64 id = 1; */ public Builder setId(long value) { bitField0_ |= 0x00000001; id_ = value; onChanged(); return this; } /** * required int64 id = 1; */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000001); id_ = 0L; onChanged(); return this; } // required string path = 2; private java.lang.Object path_ = ""; /** * required string path = 2; */ public boolean hasPath() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string path = 2; */ public java.lang.String getPath() { java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); path_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string path = 2; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); path_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string path = 2; */ public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; path_ = value; onChanged(); return this; } /** * required string path = 2; */ public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000002); path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** * required string path = 2; */ public Builder setPathBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; path_ = value; onChanged(); return this; } // required string clientName = 3; private java.lang.Object clientName_ = ""; /** * required string clientName = 3; */ public boolean hasClientName() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string clientName = 3; */ public java.lang.String getClientName() { java.lang.Object ref = clientName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientName_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientName = 3; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientNameBytes() { java.lang.Object ref = clientName_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientName_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientName = 3; */ public Builder setClientName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; clientName_ = value; onChanged(); return this; } /** * required string clientName = 3; */ public Builder clearClientName() { bitField0_ = (bitField0_ & ~0x00000004); clientName_ = getDefaultInstance().getClientName(); onChanged(); return this; } /** * required string clientName = 3; */ public Builder setClientNameBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; clientName_ = value; onChanged(); return this; } // required string clientMachine = 4; private java.lang.Object clientMachine_ = ""; /** * required string clientMachine = 4; */ public boolean hasClientMachine() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * required string clientMachine = 4; */ public java.lang.String getClientMachine() { java.lang.Object ref = clientMachine_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); clientMachine_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string clientMachine = 4; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getClientMachineBytes() { java.lang.Object ref = clientMachine_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); clientMachine_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string clientMachine = 4; */ public Builder setClientMachine( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; clientMachine_ = value; onChanged(); return this; } /** * required string clientMachine = 4; */ public Builder clearClientMachine() { bitField0_ = (bitField0_ & ~0x00000008); clientMachine_ = getDefaultInstance().getClientMachine(); onChanged(); return this; } /** * required string clientMachine = 4; */ public Builder setClientMachineBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; clientMachine_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpenFilesBatchResponseProto) } static { defaultInstance = new OpenFilesBatchResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.OpenFilesBatchResponseProto) } public interface ListOpenFilesResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ java.util.List getEntriesList(); /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto getEntries(int index); /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ int getEntriesCount(); /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ java.util.List getEntriesOrBuilderList(); /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProtoOrBuilder getEntriesOrBuilder( int index); // required bool hasMore = 2; /** * required bool hasMore = 2; */ boolean hasHasMore(); /** * required bool hasMore = 2; */ boolean getHasMore(); // repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ java.util.List getTypesList(); /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ int getTypesCount(); /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto getTypes(int index); } /** * Protobuf type {@code hadoop.hdfs.ListOpenFilesResponseProto} */ public static final class ListOpenFilesResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements ListOpenFilesResponseProtoOrBuilder { // Use ListOpenFilesResponseProto.newBuilder() to construct. private ListOpenFilesResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ListOpenFilesResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ListOpenFilesResponseProto defaultInstance; public static ListOpenFilesResponseProto getDefaultInstance() { return defaultInstance; } public ListOpenFilesResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ListOpenFilesResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { entries_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } entries_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.PARSER, extensionRegistry)); break; } case 16: { bitField0_ |= 0x00000001; hasMore_ = input.readBool(); break; } case 24: { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(3, rawValue); } else { if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { types_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000004; } types_.add(value); } break; } case 26: { int length = input.readRawVarint32(); int oldLimit = input.pushLimit(length); while(input.getBytesUntilLimit() > 0) { int rawValue = input.readEnum(); org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(3, rawValue); } else { if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { types_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000004; } types_.add(value); } } input.popLimit(oldLimit); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { entries_ = java.util.Collections.unmodifiableList(entries_); } if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { types_ = java.util.Collections.unmodifiableList(types_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListOpenFilesResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListOpenFilesResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public ListOpenFilesResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new ListOpenFilesResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; public static final int ENTRIES_FIELD_NUMBER = 1; private java.util.List entries_; /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public java.util.List getEntriesList() { return entries_; } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public java.util.List getEntriesOrBuilderList() { return entries_; } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public int getEntriesCount() { return entries_.size(); } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto getEntries(int index) { return entries_.get(index); } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProtoOrBuilder getEntriesOrBuilder( int index) { return entries_.get(index); } // required bool hasMore = 2; public static final int HASMORE_FIELD_NUMBER = 2; private boolean hasMore_; /** * required bool hasMore = 2; */ public boolean hasHasMore() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bool hasMore = 2; */ public boolean getHasMore() { return hasMore_; } // repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; public static final int TYPES_FIELD_NUMBER = 3; private java.util.List types_; /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ public java.util.List getTypesList() { return types_; } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ public int getTypesCount() { return types_.size(); } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto getTypes(int index) { return types_.get(index); } private void initFields() { entries_ = java.util.Collections.emptyList(); hasMore_ = false; types_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasHasMore()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getEntriesCount(); i++) { if (!getEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < entries_.size(); i++) { output.writeMessage(1, entries_.get(i)); } if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(2, hasMore_); } for (int i = 0; i < types_.size(); i++) { output.writeEnum(3, types_.get(i).getNumber()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < entries_.size(); i++) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeMessageSize(1, entries_.get(i)); } if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBoolSize(2, hasMore_); } { int dataSize = 0; for (int i = 0; i < types_.size(); i++) { dataSize += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeEnumSizeNoTag(types_.get(i).getNumber()); } size += dataSize; size += 1 * types_.size(); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto) obj; boolean result = true; result = result && getEntriesList() .equals(other.getEntriesList()); result = result && (hasHasMore() == other.hasHasMore()); if (hasHasMore()) { result = result && (getHasMore() == other.getHasMore()); } result = result && getTypesList() .equals(other.getTypesList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getEntriesCount() > 0) { hash = (37 * hash) + ENTRIES_FIELD_NUMBER; hash = (53 * hash) + getEntriesList().hashCode(); } if (hasHasMore()) { hash = (37 * hash) + HASMORE_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getHasMore()); } if (getTypesCount() > 0) { hash = (37 * hash) + TYPES_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getTypesList()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.ListOpenFilesResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListOpenFilesResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListOpenFilesResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getEntriesFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (entriesBuilder_ == null) { entries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { entriesBuilder_.clear(); } hasMore_ = false; bitField0_ = (bitField0_ & ~0x00000002); types_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_ListOpenFilesResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (entriesBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { entries_ = java.util.Collections.unmodifiableList(entries_); bitField0_ = (bitField0_ & ~0x00000001); } result.entries_ = entries_; } else { result.entries_ = entriesBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000001; } result.hasMore_ = hasMore_; if (((bitField0_ & 0x00000004) == 0x00000004)) { types_ = java.util.Collections.unmodifiableList(types_); bitField0_ = (bitField0_ & ~0x00000004); } result.types_ = types_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.getDefaultInstance()) return this; if (entriesBuilder_ == null) { if (!other.entries_.isEmpty()) { if (entries_.isEmpty()) { entries_ = other.entries_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureEntriesIsMutable(); entries_.addAll(other.entries_); } onChanged(); } } else { if (!other.entries_.isEmpty()) { if (entriesBuilder_.isEmpty()) { entriesBuilder_.dispose(); entriesBuilder_ = null; entries_ = other.entries_; bitField0_ = (bitField0_ & ~0x00000001); entriesBuilder_ = io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getEntriesFieldBuilder() : null; } else { entriesBuilder_.addAllMessages(other.entries_); } } } if (other.hasHasMore()) { setHasMore(other.getHasMore()); } if (!other.types_.isEmpty()) { if (types_.isEmpty()) { types_ = other.types_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureTypesIsMutable(); types_.addAll(other.types_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasHasMore()) { return false; } for (int i = 0; i < getEntriesCount(); i++) { if (!getEntries(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; private java.util.List entries_ = java.util.Collections.emptyList(); private void ensureEntriesIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { entries_ = new java.util.ArrayList(entries_); bitField0_ |= 0x00000001; } } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProtoOrBuilder> entriesBuilder_; /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public java.util.List getEntriesList() { if (entriesBuilder_ == null) { return java.util.Collections.unmodifiableList(entries_); } else { return entriesBuilder_.getMessageList(); } } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public int getEntriesCount() { if (entriesBuilder_ == null) { return entries_.size(); } else { return entriesBuilder_.getCount(); } } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto getEntries(int index) { if (entriesBuilder_ == null) { return entries_.get(index); } else { return entriesBuilder_.getMessage(index); } } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public Builder setEntries( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto value) { if (entriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureEntriesIsMutable(); entries_.set(index, value); onChanged(); } else { entriesBuilder_.setMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public Builder setEntries( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.Builder builderForValue) { if (entriesBuilder_ == null) { ensureEntriesIsMutable(); entries_.set(index, builderForValue.build()); onChanged(); } else { entriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public Builder addEntries(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto value) { if (entriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureEntriesIsMutable(); entries_.add(value); onChanged(); } else { entriesBuilder_.addMessage(value); } return this; } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public Builder addEntries( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto value) { if (entriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureEntriesIsMutable(); entries_.add(index, value); onChanged(); } else { entriesBuilder_.addMessage(index, value); } return this; } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public Builder addEntries( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.Builder builderForValue) { if (entriesBuilder_ == null) { ensureEntriesIsMutable(); entries_.add(builderForValue.build()); onChanged(); } else { entriesBuilder_.addMessage(builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public Builder addEntries( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.Builder builderForValue) { if (entriesBuilder_ == null) { ensureEntriesIsMutable(); entries_.add(index, builderForValue.build()); onChanged(); } else { entriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public Builder addAllEntries( java.lang.Iterable values) { if (entriesBuilder_ == null) { ensureEntriesIsMutable(); super.addAll(values, entries_); onChanged(); } else { entriesBuilder_.addAllMessages(values); } return this; } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public Builder clearEntries() { if (entriesBuilder_ == null) { entries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { entriesBuilder_.clear(); } return this; } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public Builder removeEntries(int index) { if (entriesBuilder_ == null) { ensureEntriesIsMutable(); entries_.remove(index); onChanged(); } else { entriesBuilder_.remove(index); } return this; } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.Builder getEntriesBuilder( int index) { return getEntriesFieldBuilder().getBuilder(index); } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProtoOrBuilder getEntriesOrBuilder( int index) { if (entriesBuilder_ == null) { return entries_.get(index); } else { return entriesBuilder_.getMessageOrBuilder(index); } } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public java.util.List getEntriesOrBuilderList() { if (entriesBuilder_ != null) { return entriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(entries_); } } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.Builder addEntriesBuilder() { return getEntriesFieldBuilder().addBuilder( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.Builder addEntriesBuilder( int index) { return getEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.getDefaultInstance()); } /** * repeated .hadoop.hdfs.OpenFilesBatchResponseProto entries = 1; */ public java.util.List getEntriesBuilderList() { return getEntriesFieldBuilder().getBuilderList(); } private io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProtoOrBuilder> getEntriesFieldBuilder() { if (entriesBuilder_ == null) { entriesBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProtoOrBuilder>( entries_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); entries_ = null; } return entriesBuilder_; } // required bool hasMore = 2; private boolean hasMore_ ; /** * required bool hasMore = 2; */ public boolean hasHasMore() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required bool hasMore = 2; */ public boolean getHasMore() { return hasMore_; } /** * required bool hasMore = 2; */ public Builder setHasMore(boolean value) { bitField0_ |= 0x00000002; hasMore_ = value; onChanged(); return this; } /** * required bool hasMore = 2; */ public Builder clearHasMore() { bitField0_ = (bitField0_ & ~0x00000002); hasMore_ = false; onChanged(); return this; } // repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; private java.util.List types_ = java.util.Collections.emptyList(); private void ensureTypesIsMutable() { if (!((bitField0_ & 0x00000004) == 0x00000004)) { types_ = new java.util.ArrayList(types_); bitField0_ |= 0x00000004; } } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ public java.util.List getTypesList() { return java.util.Collections.unmodifiableList(types_); } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ public int getTypesCount() { return types_.size(); } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto getTypes(int index) { return types_.get(index); } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ public Builder setTypes( int index, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureTypesIsMutable(); types_.set(index, value); onChanged(); return this; } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ public Builder addTypes(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesTypeProto value) { if (value == null) { throw new NullPointerException(); } ensureTypesIsMutable(); types_.add(value); onChanged(); return this; } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ public Builder addAllTypes( java.lang.Iterable values) { ensureTypesIsMutable(); super.addAll(values, types_); onChanged(); return this; } /** * repeated .hadoop.hdfs.OpenFilesTypeProto types = 3; */ public Builder clearTypes() { types_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ListOpenFilesResponseProto) } static { defaultInstance = new ListOpenFilesResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ListOpenFilesResponseProto) } public interface SatisfyStoragePolicyRequestProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { // required string src = 1; /** * required string src = 1; */ boolean hasSrc(); /** * required string src = 1; */ java.lang.String getSrc(); /** * required string src = 1; */ io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes(); } /** * Protobuf type {@code hadoop.hdfs.SatisfyStoragePolicyRequestProto} */ public static final class SatisfyStoragePolicyRequestProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SatisfyStoragePolicyRequestProtoOrBuilder { // Use SatisfyStoragePolicyRequestProto.newBuilder() to construct. private SatisfyStoragePolicyRequestProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SatisfyStoragePolicyRequestProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SatisfyStoragePolicyRequestProto defaultInstance; public static SatisfyStoragePolicyRequestProto getDefaultInstance() { return defaultInstance; } public SatisfyStoragePolicyRequestProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SatisfyStoragePolicyRequestProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; src_ = input.readBytes(); break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SatisfyStoragePolicyRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SatisfyStoragePolicyRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SatisfyStoragePolicyRequestProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SatisfyStoragePolicyRequestProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; // required string src = 1; public static final int SRC_FIELD_NUMBER = 1; private java.lang.Object src_; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString bs = (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { src_ = s; } return s; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof java.lang.String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } private void initFields() { src_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasSrc()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getSrcBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream .computeBytesSize(1, getSrcBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto) obj; boolean result = true; result = result && (hasSrc() == other.hasSrc()); if (hasSrc()) { result = result && getSrc() .equals(other.getSrc()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSrc()) { hash = (37 * hash) + SRC_FIELD_NUMBER; hash = (53 * hash) + getSrc().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SatisfyStoragePolicyRequestProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SatisfyStoragePolicyRequestProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SatisfyStoragePolicyRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); src_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SatisfyStoragePolicyRequestProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.src_ = src_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto.getDefaultInstance()) return this; if (other.hasSrc()) { bitField0_ |= 0x00000001; src_ = other.src_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasSrc()) { return false; } return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required string src = 1; private java.lang.Object src_ = ""; /** * required string src = 1; */ public boolean hasSrc() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string src = 1; */ public java.lang.String getSrc() { java.lang.Object ref = src_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref) .toStringUtf8(); src_ = s; return s; } else { return (java.lang.String) ref; } } /** * required string src = 1; */ public io.prestosql.hadoop.$internal.com.google.protobuf.ByteString getSrcBytes() { java.lang.Object ref = src_; if (ref instanceof String) { io.prestosql.hadoop.$internal.com.google.protobuf.ByteString b = io.prestosql.hadoop.$internal.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); src_ = b; return b; } else { return (io.prestosql.hadoop.$internal.com.google.protobuf.ByteString) ref; } } /** * required string src = 1; */ public Builder setSrc( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } /** * required string src = 1; */ public Builder clearSrc() { bitField0_ = (bitField0_ & ~0x00000001); src_ = getDefaultInstance().getSrc(); onChanged(); return this; } /** * required string src = 1; */ public Builder setSrcBytes( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; src_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SatisfyStoragePolicyRequestProto) } static { defaultInstance = new SatisfyStoragePolicyRequestProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SatisfyStoragePolicyRequestProto) } public interface SatisfyStoragePolicyResponseProtoOrBuilder extends io.prestosql.hadoop.$internal.com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code hadoop.hdfs.SatisfyStoragePolicyResponseProto} */ public static final class SatisfyStoragePolicyResponseProto extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage implements SatisfyStoragePolicyResponseProtoOrBuilder { // Use SatisfyStoragePolicyResponseProto.newBuilder() to construct. private SatisfyStoragePolicyResponseProto(io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private SatisfyStoragePolicyResponseProto(boolean noInit) { this.unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final SatisfyStoragePolicyResponseProto defaultInstance; public static SatisfyStoragePolicyResponseProto getDefaultInstance() { return defaultInstance; } public SatisfyStoragePolicyResponseProto getDefaultInstanceForType() { return defaultInstance; } private final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private SatisfyStoragePolicyResponseProto( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { initFields(); io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.Builder unknownFields = io.prestosql.hadoop.$internal.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SatisfyStoragePolicyResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SatisfyStoragePolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.Builder.class); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Parser PARSER = new io.prestosql.hadoop.$internal.com.google.protobuf.AbstractParser() { public SatisfyStoragePolicyResponseProto parsePartialFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return new SatisfyStoragePolicyResponseProto(input, extensionRegistry); } }; @java.lang.Override public io.prestosql.hadoop.$internal.com.google.protobuf.Parser getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.ByteString data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto parseFrom(byte[] data) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto parseFrom( byte[] data, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto parseFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto parseDelimitedFrom( java.io.InputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto parseFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code hadoop.hdfs.SatisfyStoragePolicyResponseProto} */ public static final class Builder extends io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProtoOrBuilder { public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SatisfyStoragePolicyResponseProto_descriptor; } protected io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SatisfyStoragePolicyResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.Builder.class); } // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_hadoop_hdfs_SatisfyStoragePolicyResponseProto_descriptor; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.getDefaultInstance(); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto build() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto(this); onBuilt(); return result; } public Builder mergeFrom(io.prestosql.hadoop.$internal.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( io.prestosql.hadoop.$internal.com.google.protobuf.CodedInputStream input, io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (io.prestosql.hadoop.$internal.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SatisfyStoragePolicyResponseProto) } static { defaultInstance = new SatisfyStoragePolicyResponseProto(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:hadoop.hdfs.SatisfyStoragePolicyResponseProto) } /** * Protobuf service {@code hadoop.hdfs.ClientNamenodeProtocol} */ public static abstract class ClientNamenodeProtocol implements io.prestosql.hadoop.$internal.com.google.protobuf.Service { protected ClientNamenodeProtocol() {} public interface Interface { /** * rpc getBlockLocations(.hadoop.hdfs.GetBlockLocationsRequestProto) returns (.hadoop.hdfs.GetBlockLocationsResponseProto); */ public abstract void getBlockLocations( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getServerDefaults(.hadoop.hdfs.GetServerDefaultsRequestProto) returns (.hadoop.hdfs.GetServerDefaultsResponseProto); */ public abstract void getServerDefaults( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc create(.hadoop.hdfs.CreateRequestProto) returns (.hadoop.hdfs.CreateResponseProto); */ public abstract void create( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc append(.hadoop.hdfs.AppendRequestProto) returns (.hadoop.hdfs.AppendResponseProto); */ public abstract void append( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setReplication(.hadoop.hdfs.SetReplicationRequestProto) returns (.hadoop.hdfs.SetReplicationResponseProto); */ public abstract void setReplication( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setStoragePolicy(.hadoop.hdfs.SetStoragePolicyRequestProto) returns (.hadoop.hdfs.SetStoragePolicyResponseProto); */ public abstract void setStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc unsetStoragePolicy(.hadoop.hdfs.UnsetStoragePolicyRequestProto) returns (.hadoop.hdfs.UnsetStoragePolicyResponseProto); */ public abstract void unsetStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getStoragePolicy(.hadoop.hdfs.GetStoragePolicyRequestProto) returns (.hadoop.hdfs.GetStoragePolicyResponseProto); */ public abstract void getStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getStoragePolicies(.hadoop.hdfs.GetStoragePoliciesRequestProto) returns (.hadoop.hdfs.GetStoragePoliciesResponseProto); */ public abstract void getStoragePolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setPermission(.hadoop.hdfs.SetPermissionRequestProto) returns (.hadoop.hdfs.SetPermissionResponseProto); */ public abstract void setPermission( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setOwner(.hadoop.hdfs.SetOwnerRequestProto) returns (.hadoop.hdfs.SetOwnerResponseProto); */ public abstract void setOwner( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc abandonBlock(.hadoop.hdfs.AbandonBlockRequestProto) returns (.hadoop.hdfs.AbandonBlockResponseProto); */ public abstract void abandonBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc addBlock(.hadoop.hdfs.AddBlockRequestProto) returns (.hadoop.hdfs.AddBlockResponseProto); */ public abstract void addBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getAdditionalDatanode(.hadoop.hdfs.GetAdditionalDatanodeRequestProto) returns (.hadoop.hdfs.GetAdditionalDatanodeResponseProto); */ public abstract void getAdditionalDatanode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc complete(.hadoop.hdfs.CompleteRequestProto) returns (.hadoop.hdfs.CompleteResponseProto); */ public abstract void complete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc reportBadBlocks(.hadoop.hdfs.ReportBadBlocksRequestProto) returns (.hadoop.hdfs.ReportBadBlocksResponseProto); */ public abstract void reportBadBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc concat(.hadoop.hdfs.ConcatRequestProto) returns (.hadoop.hdfs.ConcatResponseProto); */ public abstract void concat( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc truncate(.hadoop.hdfs.TruncateRequestProto) returns (.hadoop.hdfs.TruncateResponseProto); */ public abstract void truncate( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc rename(.hadoop.hdfs.RenameRequestProto) returns (.hadoop.hdfs.RenameResponseProto); */ public abstract void rename( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc rename2(.hadoop.hdfs.Rename2RequestProto) returns (.hadoop.hdfs.Rename2ResponseProto); */ public abstract void rename2( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc delete(.hadoop.hdfs.DeleteRequestProto) returns (.hadoop.hdfs.DeleteResponseProto); */ public abstract void delete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc mkdirs(.hadoop.hdfs.MkdirsRequestProto) returns (.hadoop.hdfs.MkdirsResponseProto); */ public abstract void mkdirs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getListing(.hadoop.hdfs.GetListingRequestProto) returns (.hadoop.hdfs.GetListingResponseProto); */ public abstract void getListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc renewLease(.hadoop.hdfs.RenewLeaseRequestProto) returns (.hadoop.hdfs.RenewLeaseResponseProto); */ public abstract void renewLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc recoverLease(.hadoop.hdfs.RecoverLeaseRequestProto) returns (.hadoop.hdfs.RecoverLeaseResponseProto); */ public abstract void recoverLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getFsStats(.hadoop.hdfs.GetFsStatusRequestProto) returns (.hadoop.hdfs.GetFsStatsResponseProto); */ public abstract void getFsStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getFsReplicatedBlockStats(.hadoop.hdfs.GetFsReplicatedBlockStatsRequestProto) returns (.hadoop.hdfs.GetFsReplicatedBlockStatsResponseProto); */ public abstract void getFsReplicatedBlockStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getFsECBlockGroupStats(.hadoop.hdfs.GetFsECBlockGroupStatsRequestProto) returns (.hadoop.hdfs.GetFsECBlockGroupStatsResponseProto); */ public abstract void getFsECBlockGroupStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getDatanodeReport(.hadoop.hdfs.GetDatanodeReportRequestProto) returns (.hadoop.hdfs.GetDatanodeReportResponseProto); */ public abstract void getDatanodeReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getDatanodeStorageReport(.hadoop.hdfs.GetDatanodeStorageReportRequestProto) returns (.hadoop.hdfs.GetDatanodeStorageReportResponseProto); */ public abstract void getDatanodeStorageReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getPreferredBlockSize(.hadoop.hdfs.GetPreferredBlockSizeRequestProto) returns (.hadoop.hdfs.GetPreferredBlockSizeResponseProto); */ public abstract void getPreferredBlockSize( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setSafeMode(.hadoop.hdfs.SetSafeModeRequestProto) returns (.hadoop.hdfs.SetSafeModeResponseProto); */ public abstract void setSafeMode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc saveNamespace(.hadoop.hdfs.SaveNamespaceRequestProto) returns (.hadoop.hdfs.SaveNamespaceResponseProto); */ public abstract void saveNamespace( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc rollEdits(.hadoop.hdfs.RollEditsRequestProto) returns (.hadoop.hdfs.RollEditsResponseProto); */ public abstract void rollEdits( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc restoreFailedStorage(.hadoop.hdfs.RestoreFailedStorageRequestProto) returns (.hadoop.hdfs.RestoreFailedStorageResponseProto); */ public abstract void restoreFailedStorage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc refreshNodes(.hadoop.hdfs.RefreshNodesRequestProto) returns (.hadoop.hdfs.RefreshNodesResponseProto); */ public abstract void refreshNodes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc finalizeUpgrade(.hadoop.hdfs.FinalizeUpgradeRequestProto) returns (.hadoop.hdfs.FinalizeUpgradeResponseProto); */ public abstract void finalizeUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc upgradeStatus(.hadoop.hdfs.UpgradeStatusRequestProto) returns (.hadoop.hdfs.UpgradeStatusResponseProto); */ public abstract void upgradeStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc rollingUpgrade(.hadoop.hdfs.RollingUpgradeRequestProto) returns (.hadoop.hdfs.RollingUpgradeResponseProto); */ public abstract void rollingUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listCorruptFileBlocks(.hadoop.hdfs.ListCorruptFileBlocksRequestProto) returns (.hadoop.hdfs.ListCorruptFileBlocksResponseProto); */ public abstract void listCorruptFileBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc metaSave(.hadoop.hdfs.MetaSaveRequestProto) returns (.hadoop.hdfs.MetaSaveResponseProto); */ public abstract void metaSave( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getFileInfo(.hadoop.hdfs.GetFileInfoRequestProto) returns (.hadoop.hdfs.GetFileInfoResponseProto); */ public abstract void getFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getLocatedFileInfo(.hadoop.hdfs.GetLocatedFileInfoRequestProto) returns (.hadoop.hdfs.GetLocatedFileInfoResponseProto); */ public abstract void getLocatedFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc addCacheDirective(.hadoop.hdfs.AddCacheDirectiveRequestProto) returns (.hadoop.hdfs.AddCacheDirectiveResponseProto); */ public abstract void addCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc modifyCacheDirective(.hadoop.hdfs.ModifyCacheDirectiveRequestProto) returns (.hadoop.hdfs.ModifyCacheDirectiveResponseProto); */ public abstract void modifyCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeCacheDirective(.hadoop.hdfs.RemoveCacheDirectiveRequestProto) returns (.hadoop.hdfs.RemoveCacheDirectiveResponseProto); */ public abstract void removeCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listCacheDirectives(.hadoop.hdfs.ListCacheDirectivesRequestProto) returns (.hadoop.hdfs.ListCacheDirectivesResponseProto); */ public abstract void listCacheDirectives( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc addCachePool(.hadoop.hdfs.AddCachePoolRequestProto) returns (.hadoop.hdfs.AddCachePoolResponseProto); */ public abstract void addCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc modifyCachePool(.hadoop.hdfs.ModifyCachePoolRequestProto) returns (.hadoop.hdfs.ModifyCachePoolResponseProto); */ public abstract void modifyCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeCachePool(.hadoop.hdfs.RemoveCachePoolRequestProto) returns (.hadoop.hdfs.RemoveCachePoolResponseProto); */ public abstract void removeCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listCachePools(.hadoop.hdfs.ListCachePoolsRequestProto) returns (.hadoop.hdfs.ListCachePoolsResponseProto); */ public abstract void listCachePools( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getFileLinkInfo(.hadoop.hdfs.GetFileLinkInfoRequestProto) returns (.hadoop.hdfs.GetFileLinkInfoResponseProto); */ public abstract void getFileLinkInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getContentSummary(.hadoop.hdfs.GetContentSummaryRequestProto) returns (.hadoop.hdfs.GetContentSummaryResponseProto); */ public abstract void getContentSummary( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setQuota(.hadoop.hdfs.SetQuotaRequestProto) returns (.hadoop.hdfs.SetQuotaResponseProto); */ public abstract void setQuota( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc fsync(.hadoop.hdfs.FsyncRequestProto) returns (.hadoop.hdfs.FsyncResponseProto); */ public abstract void fsync( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setTimes(.hadoop.hdfs.SetTimesRequestProto) returns (.hadoop.hdfs.SetTimesResponseProto); */ public abstract void setTimes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc createSymlink(.hadoop.hdfs.CreateSymlinkRequestProto) returns (.hadoop.hdfs.CreateSymlinkResponseProto); */ public abstract void createSymlink( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getLinkTarget(.hadoop.hdfs.GetLinkTargetRequestProto) returns (.hadoop.hdfs.GetLinkTargetResponseProto); */ public abstract void getLinkTarget( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc updateBlockForPipeline(.hadoop.hdfs.UpdateBlockForPipelineRequestProto) returns (.hadoop.hdfs.UpdateBlockForPipelineResponseProto); */ public abstract void updateBlockForPipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc updatePipeline(.hadoop.hdfs.UpdatePipelineRequestProto) returns (.hadoop.hdfs.UpdatePipelineResponseProto); */ public abstract void updatePipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getDelegationToken(.hadoop.common.GetDelegationTokenRequestProto) returns (.hadoop.common.GetDelegationTokenResponseProto); */ public abstract void getDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc renewDelegationToken(.hadoop.common.RenewDelegationTokenRequestProto) returns (.hadoop.common.RenewDelegationTokenResponseProto); */ public abstract void renewDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc cancelDelegationToken(.hadoop.common.CancelDelegationTokenRequestProto) returns (.hadoop.common.CancelDelegationTokenResponseProto); */ public abstract void cancelDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setBalancerBandwidth(.hadoop.hdfs.SetBalancerBandwidthRequestProto) returns (.hadoop.hdfs.SetBalancerBandwidthResponseProto); */ public abstract void setBalancerBandwidth( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getDataEncryptionKey(.hadoop.hdfs.GetDataEncryptionKeyRequestProto) returns (.hadoop.hdfs.GetDataEncryptionKeyResponseProto); */ public abstract void getDataEncryptionKey( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc createSnapshot(.hadoop.hdfs.CreateSnapshotRequestProto) returns (.hadoop.hdfs.CreateSnapshotResponseProto); */ public abstract void createSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc renameSnapshot(.hadoop.hdfs.RenameSnapshotRequestProto) returns (.hadoop.hdfs.RenameSnapshotResponseProto); */ public abstract void renameSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc allowSnapshot(.hadoop.hdfs.AllowSnapshotRequestProto) returns (.hadoop.hdfs.AllowSnapshotResponseProto); */ public abstract void allowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc disallowSnapshot(.hadoop.hdfs.DisallowSnapshotRequestProto) returns (.hadoop.hdfs.DisallowSnapshotResponseProto); */ public abstract void disallowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getSnapshottableDirListing(.hadoop.hdfs.GetSnapshottableDirListingRequestProto) returns (.hadoop.hdfs.GetSnapshottableDirListingResponseProto); */ public abstract void getSnapshottableDirListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc deleteSnapshot(.hadoop.hdfs.DeleteSnapshotRequestProto) returns (.hadoop.hdfs.DeleteSnapshotResponseProto); */ public abstract void deleteSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getSnapshotDiffReport(.hadoop.hdfs.GetSnapshotDiffReportRequestProto) returns (.hadoop.hdfs.GetSnapshotDiffReportResponseProto); */ public abstract void getSnapshotDiffReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getSnapshotDiffReportListing(.hadoop.hdfs.GetSnapshotDiffReportListingRequestProto) returns (.hadoop.hdfs.GetSnapshotDiffReportListingResponseProto); */ public abstract void getSnapshotDiffReportListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc isFileClosed(.hadoop.hdfs.IsFileClosedRequestProto) returns (.hadoop.hdfs.IsFileClosedResponseProto); */ public abstract void isFileClosed( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc modifyAclEntries(.hadoop.hdfs.ModifyAclEntriesRequestProto) returns (.hadoop.hdfs.ModifyAclEntriesResponseProto); */ public abstract void modifyAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeAclEntries(.hadoop.hdfs.RemoveAclEntriesRequestProto) returns (.hadoop.hdfs.RemoveAclEntriesResponseProto); */ public abstract void removeAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeDefaultAcl(.hadoop.hdfs.RemoveDefaultAclRequestProto) returns (.hadoop.hdfs.RemoveDefaultAclResponseProto); */ public abstract void removeDefaultAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeAcl(.hadoop.hdfs.RemoveAclRequestProto) returns (.hadoop.hdfs.RemoveAclResponseProto); */ public abstract void removeAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setAcl(.hadoop.hdfs.SetAclRequestProto) returns (.hadoop.hdfs.SetAclResponseProto); */ public abstract void setAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getAclStatus(.hadoop.hdfs.GetAclStatusRequestProto) returns (.hadoop.hdfs.GetAclStatusResponseProto); */ public abstract void getAclStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setXAttr(.hadoop.hdfs.SetXAttrRequestProto) returns (.hadoop.hdfs.SetXAttrResponseProto); */ public abstract void setXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getXAttrs(.hadoop.hdfs.GetXAttrsRequestProto) returns (.hadoop.hdfs.GetXAttrsResponseProto); */ public abstract void getXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listXAttrs(.hadoop.hdfs.ListXAttrsRequestProto) returns (.hadoop.hdfs.ListXAttrsResponseProto); */ public abstract void listXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeXAttr(.hadoop.hdfs.RemoveXAttrRequestProto) returns (.hadoop.hdfs.RemoveXAttrResponseProto); */ public abstract void removeXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc checkAccess(.hadoop.hdfs.CheckAccessRequestProto) returns (.hadoop.hdfs.CheckAccessResponseProto); */ public abstract void checkAccess( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc createEncryptionZone(.hadoop.hdfs.CreateEncryptionZoneRequestProto) returns (.hadoop.hdfs.CreateEncryptionZoneResponseProto); */ public abstract void createEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listEncryptionZones(.hadoop.hdfs.ListEncryptionZonesRequestProto) returns (.hadoop.hdfs.ListEncryptionZonesResponseProto); */ public abstract void listEncryptionZones( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc reencryptEncryptionZone(.hadoop.hdfs.ReencryptEncryptionZoneRequestProto) returns (.hadoop.hdfs.ReencryptEncryptionZoneResponseProto); */ public abstract void reencryptEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listReencryptionStatus(.hadoop.hdfs.ListReencryptionStatusRequestProto) returns (.hadoop.hdfs.ListReencryptionStatusResponseProto); */ public abstract void listReencryptionStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getEZForPath(.hadoop.hdfs.GetEZForPathRequestProto) returns (.hadoop.hdfs.GetEZForPathResponseProto); */ public abstract void getEZForPath( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setErasureCodingPolicy(.hadoop.hdfs.SetErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.SetErasureCodingPolicyResponseProto); */ public abstract void setErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc unsetErasureCodingPolicy(.hadoop.hdfs.UnsetErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.UnsetErasureCodingPolicyResponseProto); */ public abstract void unsetErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getCurrentEditLogTxid(.hadoop.hdfs.GetCurrentEditLogTxidRequestProto) returns (.hadoop.hdfs.GetCurrentEditLogTxidResponseProto); */ public abstract void getCurrentEditLogTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getEditsFromTxid(.hadoop.hdfs.GetEditsFromTxidRequestProto) returns (.hadoop.hdfs.GetEditsFromTxidResponseProto); */ public abstract void getEditsFromTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getErasureCodingPolicies(.hadoop.hdfs.GetErasureCodingPoliciesRequestProto) returns (.hadoop.hdfs.GetErasureCodingPoliciesResponseProto); */ public abstract void getErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc addErasureCodingPolicies(.hadoop.hdfs.AddErasureCodingPoliciesRequestProto) returns (.hadoop.hdfs.AddErasureCodingPoliciesResponseProto); */ public abstract void addErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeErasureCodingPolicy(.hadoop.hdfs.RemoveErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.RemoveErasureCodingPolicyResponseProto); */ public abstract void removeErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc enableErasureCodingPolicy(.hadoop.hdfs.EnableErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.EnableErasureCodingPolicyResponseProto); */ public abstract void enableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc disableErasureCodingPolicy(.hadoop.hdfs.DisableErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.DisableErasureCodingPolicyResponseProto); */ public abstract void disableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getErasureCodingPolicy(.hadoop.hdfs.GetErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.GetErasureCodingPolicyResponseProto); */ public abstract void getErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getErasureCodingCodecs(.hadoop.hdfs.GetErasureCodingCodecsRequestProto) returns (.hadoop.hdfs.GetErasureCodingCodecsResponseProto); */ public abstract void getErasureCodingCodecs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getQuotaUsage(.hadoop.hdfs.GetQuotaUsageRequestProto) returns (.hadoop.hdfs.GetQuotaUsageResponseProto); */ public abstract void getQuotaUsage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listOpenFiles(.hadoop.hdfs.ListOpenFilesRequestProto) returns (.hadoop.hdfs.ListOpenFilesResponseProto); */ public abstract void listOpenFiles( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc satisfyStoragePolicy(.hadoop.hdfs.SatisfyStoragePolicyRequestProto) returns (.hadoop.hdfs.SatisfyStoragePolicyResponseProto); */ public abstract void satisfyStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); } public static io.prestosql.hadoop.$internal.com.google.protobuf.Service newReflectiveService( final Interface impl) { return new ClientNamenodeProtocol() { @java.lang.Override public void getBlockLocations( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getBlockLocations(controller, request, done); } @java.lang.Override public void getServerDefaults( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getServerDefaults(controller, request, done); } @java.lang.Override public void create( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.create(controller, request, done); } @java.lang.Override public void append( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.append(controller, request, done); } @java.lang.Override public void setReplication( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.setReplication(controller, request, done); } @java.lang.Override public void setStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.setStoragePolicy(controller, request, done); } @java.lang.Override public void unsetStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.unsetStoragePolicy(controller, request, done); } @java.lang.Override public void getStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getStoragePolicy(controller, request, done); } @java.lang.Override public void getStoragePolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getStoragePolicies(controller, request, done); } @java.lang.Override public void setPermission( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.setPermission(controller, request, done); } @java.lang.Override public void setOwner( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.setOwner(controller, request, done); } @java.lang.Override public void abandonBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.abandonBlock(controller, request, done); } @java.lang.Override public void addBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.addBlock(controller, request, done); } @java.lang.Override public void getAdditionalDatanode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getAdditionalDatanode(controller, request, done); } @java.lang.Override public void complete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.complete(controller, request, done); } @java.lang.Override public void reportBadBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.reportBadBlocks(controller, request, done); } @java.lang.Override public void concat( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.concat(controller, request, done); } @java.lang.Override public void truncate( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.truncate(controller, request, done); } @java.lang.Override public void rename( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.rename(controller, request, done); } @java.lang.Override public void rename2( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.rename2(controller, request, done); } @java.lang.Override public void delete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.delete(controller, request, done); } @java.lang.Override public void mkdirs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.mkdirs(controller, request, done); } @java.lang.Override public void getListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getListing(controller, request, done); } @java.lang.Override public void renewLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.renewLease(controller, request, done); } @java.lang.Override public void recoverLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.recoverLease(controller, request, done); } @java.lang.Override public void getFsStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getFsStats(controller, request, done); } @java.lang.Override public void getFsReplicatedBlockStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getFsReplicatedBlockStats(controller, request, done); } @java.lang.Override public void getFsECBlockGroupStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getFsECBlockGroupStats(controller, request, done); } @java.lang.Override public void getDatanodeReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getDatanodeReport(controller, request, done); } @java.lang.Override public void getDatanodeStorageReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getDatanodeStorageReport(controller, request, done); } @java.lang.Override public void getPreferredBlockSize( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getPreferredBlockSize(controller, request, done); } @java.lang.Override public void setSafeMode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.setSafeMode(controller, request, done); } @java.lang.Override public void saveNamespace( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.saveNamespace(controller, request, done); } @java.lang.Override public void rollEdits( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.rollEdits(controller, request, done); } @java.lang.Override public void restoreFailedStorage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.restoreFailedStorage(controller, request, done); } @java.lang.Override public void refreshNodes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.refreshNodes(controller, request, done); } @java.lang.Override public void finalizeUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.finalizeUpgrade(controller, request, done); } @java.lang.Override public void upgradeStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.upgradeStatus(controller, request, done); } @java.lang.Override public void rollingUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.rollingUpgrade(controller, request, done); } @java.lang.Override public void listCorruptFileBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.listCorruptFileBlocks(controller, request, done); } @java.lang.Override public void metaSave( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.metaSave(controller, request, done); } @java.lang.Override public void getFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getFileInfo(controller, request, done); } @java.lang.Override public void getLocatedFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getLocatedFileInfo(controller, request, done); } @java.lang.Override public void addCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.addCacheDirective(controller, request, done); } @java.lang.Override public void modifyCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.modifyCacheDirective(controller, request, done); } @java.lang.Override public void removeCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.removeCacheDirective(controller, request, done); } @java.lang.Override public void listCacheDirectives( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.listCacheDirectives(controller, request, done); } @java.lang.Override public void addCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.addCachePool(controller, request, done); } @java.lang.Override public void modifyCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.modifyCachePool(controller, request, done); } @java.lang.Override public void removeCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.removeCachePool(controller, request, done); } @java.lang.Override public void listCachePools( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.listCachePools(controller, request, done); } @java.lang.Override public void getFileLinkInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getFileLinkInfo(controller, request, done); } @java.lang.Override public void getContentSummary( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getContentSummary(controller, request, done); } @java.lang.Override public void setQuota( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.setQuota(controller, request, done); } @java.lang.Override public void fsync( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.fsync(controller, request, done); } @java.lang.Override public void setTimes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.setTimes(controller, request, done); } @java.lang.Override public void createSymlink( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.createSymlink(controller, request, done); } @java.lang.Override public void getLinkTarget( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getLinkTarget(controller, request, done); } @java.lang.Override public void updateBlockForPipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.updateBlockForPipeline(controller, request, done); } @java.lang.Override public void updatePipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.updatePipeline(controller, request, done); } @java.lang.Override public void getDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getDelegationToken(controller, request, done); } @java.lang.Override public void renewDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.renewDelegationToken(controller, request, done); } @java.lang.Override public void cancelDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.cancelDelegationToken(controller, request, done); } @java.lang.Override public void setBalancerBandwidth( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.setBalancerBandwidth(controller, request, done); } @java.lang.Override public void getDataEncryptionKey( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getDataEncryptionKey(controller, request, done); } @java.lang.Override public void createSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.createSnapshot(controller, request, done); } @java.lang.Override public void renameSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.renameSnapshot(controller, request, done); } @java.lang.Override public void allowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.allowSnapshot(controller, request, done); } @java.lang.Override public void disallowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.disallowSnapshot(controller, request, done); } @java.lang.Override public void getSnapshottableDirListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getSnapshottableDirListing(controller, request, done); } @java.lang.Override public void deleteSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.deleteSnapshot(controller, request, done); } @java.lang.Override public void getSnapshotDiffReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getSnapshotDiffReport(controller, request, done); } @java.lang.Override public void getSnapshotDiffReportListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getSnapshotDiffReportListing(controller, request, done); } @java.lang.Override public void isFileClosed( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.isFileClosed(controller, request, done); } @java.lang.Override public void modifyAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.modifyAclEntries(controller, request, done); } @java.lang.Override public void removeAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.removeAclEntries(controller, request, done); } @java.lang.Override public void removeDefaultAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.removeDefaultAcl(controller, request, done); } @java.lang.Override public void removeAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.removeAcl(controller, request, done); } @java.lang.Override public void setAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.setAcl(controller, request, done); } @java.lang.Override public void getAclStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getAclStatus(controller, request, done); } @java.lang.Override public void setXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.setXAttr(controller, request, done); } @java.lang.Override public void getXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getXAttrs(controller, request, done); } @java.lang.Override public void listXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.listXAttrs(controller, request, done); } @java.lang.Override public void removeXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.removeXAttr(controller, request, done); } @java.lang.Override public void checkAccess( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.checkAccess(controller, request, done); } @java.lang.Override public void createEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.createEncryptionZone(controller, request, done); } @java.lang.Override public void listEncryptionZones( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.listEncryptionZones(controller, request, done); } @java.lang.Override public void reencryptEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.reencryptEncryptionZone(controller, request, done); } @java.lang.Override public void listReencryptionStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.listReencryptionStatus(controller, request, done); } @java.lang.Override public void getEZForPath( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getEZForPath(controller, request, done); } @java.lang.Override public void setErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.setErasureCodingPolicy(controller, request, done); } @java.lang.Override public void unsetErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.unsetErasureCodingPolicy(controller, request, done); } @java.lang.Override public void getCurrentEditLogTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getCurrentEditLogTxid(controller, request, done); } @java.lang.Override public void getEditsFromTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getEditsFromTxid(controller, request, done); } @java.lang.Override public void getErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getErasureCodingPolicies(controller, request, done); } @java.lang.Override public void addErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.addErasureCodingPolicies(controller, request, done); } @java.lang.Override public void removeErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.removeErasureCodingPolicy(controller, request, done); } @java.lang.Override public void enableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.enableErasureCodingPolicy(controller, request, done); } @java.lang.Override public void disableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.disableErasureCodingPolicy(controller, request, done); } @java.lang.Override public void getErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getErasureCodingPolicy(controller, request, done); } @java.lang.Override public void getErasureCodingCodecs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getErasureCodingCodecs(controller, request, done); } @java.lang.Override public void getQuotaUsage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.getQuotaUsage(controller, request, done); } @java.lang.Override public void listOpenFiles( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.listOpenFiles(controller, request, done); } @java.lang.Override public void satisfyStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { impl.satisfyStoragePolicy(controller, request, done); } }; } public static io.prestosql.hadoop.$internal.com.google.protobuf.BlockingService newReflectiveBlockingService(final BlockingInterface impl) { return new io.prestosql.hadoop.$internal.com.google.protobuf.BlockingService() { public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.ServiceDescriptor getDescriptorForType() { return getDescriptor(); } public final io.prestosql.hadoop.$internal.com.google.protobuf.Message callBlockingMethod( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method, io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, io.prestosql.hadoop.$internal.com.google.protobuf.Message request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.callBlockingMethod() given method descriptor for " + "wrong service type."); } switch(method.getIndex()) { case 0: return impl.getBlockLocations(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto)request); case 1: return impl.getServerDefaults(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto)request); case 2: return impl.create(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto)request); case 3: return impl.append(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto)request); case 4: return impl.setReplication(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto)request); case 5: return impl.setStoragePolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto)request); case 6: return impl.unsetStoragePolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto)request); case 7: return impl.getStoragePolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto)request); case 8: return impl.getStoragePolicies(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto)request); case 9: return impl.setPermission(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto)request); case 10: return impl.setOwner(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto)request); case 11: return impl.abandonBlock(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto)request); case 12: return impl.addBlock(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto)request); case 13: return impl.getAdditionalDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto)request); case 14: return impl.complete(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto)request); case 15: return impl.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto)request); case 16: return impl.concat(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto)request); case 17: return impl.truncate(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto)request); case 18: return impl.rename(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto)request); case 19: return impl.rename2(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto)request); case 20: return impl.delete(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto)request); case 21: return impl.mkdirs(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto)request); case 22: return impl.getListing(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto)request); case 23: return impl.renewLease(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto)request); case 24: return impl.recoverLease(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto)request); case 25: return impl.getFsStats(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto)request); case 26: return impl.getFsReplicatedBlockStats(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto)request); case 27: return impl.getFsECBlockGroupStats(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto)request); case 28: return impl.getDatanodeReport(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto)request); case 29: return impl.getDatanodeStorageReport(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto)request); case 30: return impl.getPreferredBlockSize(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto)request); case 31: return impl.setSafeMode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto)request); case 32: return impl.saveNamespace(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto)request); case 33: return impl.rollEdits(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto)request); case 34: return impl.restoreFailedStorage(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto)request); case 35: return impl.refreshNodes(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto)request); case 36: return impl.finalizeUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto)request); case 37: return impl.upgradeStatus(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto)request); case 38: return impl.rollingUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto)request); case 39: return impl.listCorruptFileBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto)request); case 40: return impl.metaSave(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto)request); case 41: return impl.getFileInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto)request); case 42: return impl.getLocatedFileInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto)request); case 43: return impl.addCacheDirective(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto)request); case 44: return impl.modifyCacheDirective(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto)request); case 45: return impl.removeCacheDirective(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto)request); case 46: return impl.listCacheDirectives(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto)request); case 47: return impl.addCachePool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto)request); case 48: return impl.modifyCachePool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto)request); case 49: return impl.removeCachePool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto)request); case 50: return impl.listCachePools(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto)request); case 51: return impl.getFileLinkInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto)request); case 52: return impl.getContentSummary(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto)request); case 53: return impl.setQuota(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto)request); case 54: return impl.fsync(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto)request); case 55: return impl.setTimes(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto)request); case 56: return impl.createSymlink(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto)request); case 57: return impl.getLinkTarget(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto)request); case 58: return impl.updateBlockForPipeline(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto)request); case 59: return impl.updatePipeline(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto)request); case 60: return impl.getDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto)request); case 61: return impl.renewDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto)request); case 62: return impl.cancelDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto)request); case 63: return impl.setBalancerBandwidth(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto)request); case 64: return impl.getDataEncryptionKey(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto)request); case 65: return impl.createSnapshot(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto)request); case 66: return impl.renameSnapshot(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto)request); case 67: return impl.allowSnapshot(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto)request); case 68: return impl.disallowSnapshot(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto)request); case 69: return impl.getSnapshottableDirListing(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto)request); case 70: return impl.deleteSnapshot(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto)request); case 71: return impl.getSnapshotDiffReport(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto)request); case 72: return impl.getSnapshotDiffReportListing(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto)request); case 73: return impl.isFileClosed(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto)request); case 74: return impl.modifyAclEntries(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto)request); case 75: return impl.removeAclEntries(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto)request); case 76: return impl.removeDefaultAcl(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto)request); case 77: return impl.removeAcl(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto)request); case 78: return impl.setAcl(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto)request); case 79: return impl.getAclStatus(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto)request); case 80: return impl.setXAttr(controller, (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto)request); case 81: return impl.getXAttrs(controller, (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto)request); case 82: return impl.listXAttrs(controller, (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto)request); case 83: return impl.removeXAttr(controller, (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto)request); case 84: return impl.checkAccess(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto)request); case 85: return impl.createEncryptionZone(controller, (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto)request); case 86: return impl.listEncryptionZones(controller, (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto)request); case 87: return impl.reencryptEncryptionZone(controller, (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto)request); case 88: return impl.listReencryptionStatus(controller, (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto)request); case 89: return impl.getEZForPath(controller, (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto)request); case 90: return impl.setErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto)request); case 91: return impl.unsetErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto)request); case 92: return impl.getCurrentEditLogTxid(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto)request); case 93: return impl.getEditsFromTxid(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto)request); case 94: return impl.getErasureCodingPolicies(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto)request); case 95: return impl.addErasureCodingPolicies(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto)request); case 96: return impl.removeErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto)request); case 97: return impl.enableErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto)request); case 98: return impl.disableErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto)request); case 99: return impl.getErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto)request); case 100: return impl.getErasureCodingCodecs(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto)request); case 101: return impl.getQuotaUsage(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto)request); case 102: return impl.listOpenFiles(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto)request); case 103: return impl.satisfyStoragePolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto)request); default: throw new java.lang.AssertionError("Can't get here."); } } public final io.prestosql.hadoop.$internal.com.google.protobuf.Message getRequestPrototype( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.getDefaultInstance(); case 14: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.getDefaultInstance(); case 15: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance(); case 16: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.getDefaultInstance(); case 19: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.getDefaultInstance(); case 20: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.getDefaultInstance(); case 21: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.getDefaultInstance(); case 22: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.getDefaultInstance(); case 23: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.getDefaultInstance(); case 24: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.getDefaultInstance(); case 25: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.getDefaultInstance(); case 26: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto.getDefaultInstance(); case 27: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto.getDefaultInstance(); case 28: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.getDefaultInstance(); case 29: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto.getDefaultInstance(); case 30: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.getDefaultInstance(); case 31: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.getDefaultInstance(); case 32: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.getDefaultInstance(); case 33: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto.getDefaultInstance(); case 34: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.getDefaultInstance(); case 35: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.getDefaultInstance(); case 36: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.getDefaultInstance(); case 37: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto.getDefaultInstance(); case 38: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto.getDefaultInstance(); case 39: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.getDefaultInstance(); case 40: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.getDefaultInstance(); case 41: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.getDefaultInstance(); case 42: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto.getDefaultInstance(); case 43: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto.getDefaultInstance(); case 44: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto.getDefaultInstance(); case 45: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto.getDefaultInstance(); case 46: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto.getDefaultInstance(); case 47: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto.getDefaultInstance(); case 48: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto.getDefaultInstance(); case 49: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto.getDefaultInstance(); case 50: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto.getDefaultInstance(); case 51: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.getDefaultInstance(); case 52: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.getDefaultInstance(); case 53: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.getDefaultInstance(); case 54: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.getDefaultInstance(); case 55: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.getDefaultInstance(); case 56: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.getDefaultInstance(); case 57: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.getDefaultInstance(); case 58: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.getDefaultInstance(); case 59: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.getDefaultInstance(); case 60: return org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto.getDefaultInstance(); case 61: return org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto.getDefaultInstance(); case 62: return org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto.getDefaultInstance(); case 63: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.getDefaultInstance(); case 64: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto.getDefaultInstance(); case 65: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto.getDefaultInstance(); case 66: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto.getDefaultInstance(); case 67: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto.getDefaultInstance(); case 68: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto.getDefaultInstance(); case 69: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto.getDefaultInstance(); case 70: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto.getDefaultInstance(); case 71: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto.getDefaultInstance(); case 72: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto.getDefaultInstance(); case 73: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto.getDefaultInstance(); case 74: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto.getDefaultInstance(); case 75: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto.getDefaultInstance(); case 76: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto.getDefaultInstance(); case 77: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto.getDefaultInstance(); case 78: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto.getDefaultInstance(); case 79: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto.getDefaultInstance(); case 80: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto.getDefaultInstance(); case 81: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto.getDefaultInstance(); case 82: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto.getDefaultInstance(); case 83: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto.getDefaultInstance(); case 84: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto.getDefaultInstance(); case 85: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto.getDefaultInstance(); case 86: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto.getDefaultInstance(); case 87: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto.getDefaultInstance(); case 88: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto.getDefaultInstance(); case 89: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto.getDefaultInstance(); case 90: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto.getDefaultInstance(); case 91: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto.getDefaultInstance(); case 92: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto.getDefaultInstance(); case 93: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto.getDefaultInstance(); case 94: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto.getDefaultInstance(); case 95: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto.getDefaultInstance(); case 96: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto.getDefaultInstance(); case 97: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto.getDefaultInstance(); case 98: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto.getDefaultInstance(); case 99: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto.getDefaultInstance(); case 100: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto.getDefaultInstance(); case 101: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto.getDefaultInstance(); case 102: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto.getDefaultInstance(); case 103: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public final io.prestosql.hadoop.$internal.com.google.protobuf.Message getResponsePrototype( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getResponsePrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance(); case 14: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance(); case 15: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(); case 16: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance(); case 19: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance(); case 20: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance(); case 21: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance(); case 22: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance(); case 23: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance(); case 24: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance(); case 25: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance(); case 26: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.getDefaultInstance(); case 27: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.getDefaultInstance(); case 28: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance(); case 29: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.getDefaultInstance(); case 30: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance(); case 31: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance(); case 32: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance(); case 33: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.getDefaultInstance(); case 34: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance(); case 35: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance(); case 36: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance(); case 37: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.getDefaultInstance(); case 38: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.getDefaultInstance(); case 39: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance(); case 40: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance(); case 41: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance(); case 42: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.getDefaultInstance(); case 43: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.getDefaultInstance(); case 44: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.getDefaultInstance(); case 45: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.getDefaultInstance(); case 46: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.getDefaultInstance(); case 47: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.getDefaultInstance(); case 48: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.getDefaultInstance(); case 49: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.getDefaultInstance(); case 50: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.getDefaultInstance(); case 51: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance(); case 52: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance(); case 53: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance(); case 54: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance(); case 55: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance(); case 56: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance(); case 57: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance(); case 58: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance(); case 59: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance(); case 60: return org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.getDefaultInstance(); case 61: return org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.getDefaultInstance(); case 62: return org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.getDefaultInstance(); case 63: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance(); case 64: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.getDefaultInstance(); case 65: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.getDefaultInstance(); case 66: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.getDefaultInstance(); case 67: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.getDefaultInstance(); case 68: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.getDefaultInstance(); case 69: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.getDefaultInstance(); case 70: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.getDefaultInstance(); case 71: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.getDefaultInstance(); case 72: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.getDefaultInstance(); case 73: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.getDefaultInstance(); case 74: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto.getDefaultInstance(); case 75: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto.getDefaultInstance(); case 76: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto.getDefaultInstance(); case 77: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto.getDefaultInstance(); case 78: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto.getDefaultInstance(); case 79: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto.getDefaultInstance(); case 80: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto.getDefaultInstance(); case 81: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto.getDefaultInstance(); case 82: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto.getDefaultInstance(); case 83: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto.getDefaultInstance(); case 84: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.getDefaultInstance(); case 85: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto.getDefaultInstance(); case 86: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto.getDefaultInstance(); case 87: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneResponseProto.getDefaultInstance(); case 88: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusResponseProto.getDefaultInstance(); case 89: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto.getDefaultInstance(); case 90: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto.getDefaultInstance(); case 91: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto.getDefaultInstance(); case 92: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.getDefaultInstance(); case 93: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.getDefaultInstance(); case 94: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto.getDefaultInstance(); case 95: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesResponseProto.getDefaultInstance(); case 96: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyResponseProto.getDefaultInstance(); case 97: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyResponseProto.getDefaultInstance(); case 98: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyResponseProto.getDefaultInstance(); case 99: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto.getDefaultInstance(); case 100: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto.getDefaultInstance(); case 101: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.getDefaultInstance(); case 102: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.getDefaultInstance(); case 103: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } }; } /** * rpc getBlockLocations(.hadoop.hdfs.GetBlockLocationsRequestProto) returns (.hadoop.hdfs.GetBlockLocationsResponseProto); */ public abstract void getBlockLocations( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getServerDefaults(.hadoop.hdfs.GetServerDefaultsRequestProto) returns (.hadoop.hdfs.GetServerDefaultsResponseProto); */ public abstract void getServerDefaults( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc create(.hadoop.hdfs.CreateRequestProto) returns (.hadoop.hdfs.CreateResponseProto); */ public abstract void create( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc append(.hadoop.hdfs.AppendRequestProto) returns (.hadoop.hdfs.AppendResponseProto); */ public abstract void append( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setReplication(.hadoop.hdfs.SetReplicationRequestProto) returns (.hadoop.hdfs.SetReplicationResponseProto); */ public abstract void setReplication( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setStoragePolicy(.hadoop.hdfs.SetStoragePolicyRequestProto) returns (.hadoop.hdfs.SetStoragePolicyResponseProto); */ public abstract void setStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc unsetStoragePolicy(.hadoop.hdfs.UnsetStoragePolicyRequestProto) returns (.hadoop.hdfs.UnsetStoragePolicyResponseProto); */ public abstract void unsetStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getStoragePolicy(.hadoop.hdfs.GetStoragePolicyRequestProto) returns (.hadoop.hdfs.GetStoragePolicyResponseProto); */ public abstract void getStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getStoragePolicies(.hadoop.hdfs.GetStoragePoliciesRequestProto) returns (.hadoop.hdfs.GetStoragePoliciesResponseProto); */ public abstract void getStoragePolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setPermission(.hadoop.hdfs.SetPermissionRequestProto) returns (.hadoop.hdfs.SetPermissionResponseProto); */ public abstract void setPermission( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setOwner(.hadoop.hdfs.SetOwnerRequestProto) returns (.hadoop.hdfs.SetOwnerResponseProto); */ public abstract void setOwner( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc abandonBlock(.hadoop.hdfs.AbandonBlockRequestProto) returns (.hadoop.hdfs.AbandonBlockResponseProto); */ public abstract void abandonBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc addBlock(.hadoop.hdfs.AddBlockRequestProto) returns (.hadoop.hdfs.AddBlockResponseProto); */ public abstract void addBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getAdditionalDatanode(.hadoop.hdfs.GetAdditionalDatanodeRequestProto) returns (.hadoop.hdfs.GetAdditionalDatanodeResponseProto); */ public abstract void getAdditionalDatanode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc complete(.hadoop.hdfs.CompleteRequestProto) returns (.hadoop.hdfs.CompleteResponseProto); */ public abstract void complete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc reportBadBlocks(.hadoop.hdfs.ReportBadBlocksRequestProto) returns (.hadoop.hdfs.ReportBadBlocksResponseProto); */ public abstract void reportBadBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc concat(.hadoop.hdfs.ConcatRequestProto) returns (.hadoop.hdfs.ConcatResponseProto); */ public abstract void concat( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc truncate(.hadoop.hdfs.TruncateRequestProto) returns (.hadoop.hdfs.TruncateResponseProto); */ public abstract void truncate( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc rename(.hadoop.hdfs.RenameRequestProto) returns (.hadoop.hdfs.RenameResponseProto); */ public abstract void rename( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc rename2(.hadoop.hdfs.Rename2RequestProto) returns (.hadoop.hdfs.Rename2ResponseProto); */ public abstract void rename2( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc delete(.hadoop.hdfs.DeleteRequestProto) returns (.hadoop.hdfs.DeleteResponseProto); */ public abstract void delete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc mkdirs(.hadoop.hdfs.MkdirsRequestProto) returns (.hadoop.hdfs.MkdirsResponseProto); */ public abstract void mkdirs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getListing(.hadoop.hdfs.GetListingRequestProto) returns (.hadoop.hdfs.GetListingResponseProto); */ public abstract void getListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc renewLease(.hadoop.hdfs.RenewLeaseRequestProto) returns (.hadoop.hdfs.RenewLeaseResponseProto); */ public abstract void renewLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc recoverLease(.hadoop.hdfs.RecoverLeaseRequestProto) returns (.hadoop.hdfs.RecoverLeaseResponseProto); */ public abstract void recoverLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getFsStats(.hadoop.hdfs.GetFsStatusRequestProto) returns (.hadoop.hdfs.GetFsStatsResponseProto); */ public abstract void getFsStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getFsReplicatedBlockStats(.hadoop.hdfs.GetFsReplicatedBlockStatsRequestProto) returns (.hadoop.hdfs.GetFsReplicatedBlockStatsResponseProto); */ public abstract void getFsReplicatedBlockStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getFsECBlockGroupStats(.hadoop.hdfs.GetFsECBlockGroupStatsRequestProto) returns (.hadoop.hdfs.GetFsECBlockGroupStatsResponseProto); */ public abstract void getFsECBlockGroupStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getDatanodeReport(.hadoop.hdfs.GetDatanodeReportRequestProto) returns (.hadoop.hdfs.GetDatanodeReportResponseProto); */ public abstract void getDatanodeReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getDatanodeStorageReport(.hadoop.hdfs.GetDatanodeStorageReportRequestProto) returns (.hadoop.hdfs.GetDatanodeStorageReportResponseProto); */ public abstract void getDatanodeStorageReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getPreferredBlockSize(.hadoop.hdfs.GetPreferredBlockSizeRequestProto) returns (.hadoop.hdfs.GetPreferredBlockSizeResponseProto); */ public abstract void getPreferredBlockSize( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setSafeMode(.hadoop.hdfs.SetSafeModeRequestProto) returns (.hadoop.hdfs.SetSafeModeResponseProto); */ public abstract void setSafeMode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc saveNamespace(.hadoop.hdfs.SaveNamespaceRequestProto) returns (.hadoop.hdfs.SaveNamespaceResponseProto); */ public abstract void saveNamespace( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc rollEdits(.hadoop.hdfs.RollEditsRequestProto) returns (.hadoop.hdfs.RollEditsResponseProto); */ public abstract void rollEdits( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc restoreFailedStorage(.hadoop.hdfs.RestoreFailedStorageRequestProto) returns (.hadoop.hdfs.RestoreFailedStorageResponseProto); */ public abstract void restoreFailedStorage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc refreshNodes(.hadoop.hdfs.RefreshNodesRequestProto) returns (.hadoop.hdfs.RefreshNodesResponseProto); */ public abstract void refreshNodes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc finalizeUpgrade(.hadoop.hdfs.FinalizeUpgradeRequestProto) returns (.hadoop.hdfs.FinalizeUpgradeResponseProto); */ public abstract void finalizeUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc upgradeStatus(.hadoop.hdfs.UpgradeStatusRequestProto) returns (.hadoop.hdfs.UpgradeStatusResponseProto); */ public abstract void upgradeStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc rollingUpgrade(.hadoop.hdfs.RollingUpgradeRequestProto) returns (.hadoop.hdfs.RollingUpgradeResponseProto); */ public abstract void rollingUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listCorruptFileBlocks(.hadoop.hdfs.ListCorruptFileBlocksRequestProto) returns (.hadoop.hdfs.ListCorruptFileBlocksResponseProto); */ public abstract void listCorruptFileBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc metaSave(.hadoop.hdfs.MetaSaveRequestProto) returns (.hadoop.hdfs.MetaSaveResponseProto); */ public abstract void metaSave( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getFileInfo(.hadoop.hdfs.GetFileInfoRequestProto) returns (.hadoop.hdfs.GetFileInfoResponseProto); */ public abstract void getFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getLocatedFileInfo(.hadoop.hdfs.GetLocatedFileInfoRequestProto) returns (.hadoop.hdfs.GetLocatedFileInfoResponseProto); */ public abstract void getLocatedFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc addCacheDirective(.hadoop.hdfs.AddCacheDirectiveRequestProto) returns (.hadoop.hdfs.AddCacheDirectiveResponseProto); */ public abstract void addCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc modifyCacheDirective(.hadoop.hdfs.ModifyCacheDirectiveRequestProto) returns (.hadoop.hdfs.ModifyCacheDirectiveResponseProto); */ public abstract void modifyCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeCacheDirective(.hadoop.hdfs.RemoveCacheDirectiveRequestProto) returns (.hadoop.hdfs.RemoveCacheDirectiveResponseProto); */ public abstract void removeCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listCacheDirectives(.hadoop.hdfs.ListCacheDirectivesRequestProto) returns (.hadoop.hdfs.ListCacheDirectivesResponseProto); */ public abstract void listCacheDirectives( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc addCachePool(.hadoop.hdfs.AddCachePoolRequestProto) returns (.hadoop.hdfs.AddCachePoolResponseProto); */ public abstract void addCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc modifyCachePool(.hadoop.hdfs.ModifyCachePoolRequestProto) returns (.hadoop.hdfs.ModifyCachePoolResponseProto); */ public abstract void modifyCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeCachePool(.hadoop.hdfs.RemoveCachePoolRequestProto) returns (.hadoop.hdfs.RemoveCachePoolResponseProto); */ public abstract void removeCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listCachePools(.hadoop.hdfs.ListCachePoolsRequestProto) returns (.hadoop.hdfs.ListCachePoolsResponseProto); */ public abstract void listCachePools( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getFileLinkInfo(.hadoop.hdfs.GetFileLinkInfoRequestProto) returns (.hadoop.hdfs.GetFileLinkInfoResponseProto); */ public abstract void getFileLinkInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getContentSummary(.hadoop.hdfs.GetContentSummaryRequestProto) returns (.hadoop.hdfs.GetContentSummaryResponseProto); */ public abstract void getContentSummary( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setQuota(.hadoop.hdfs.SetQuotaRequestProto) returns (.hadoop.hdfs.SetQuotaResponseProto); */ public abstract void setQuota( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc fsync(.hadoop.hdfs.FsyncRequestProto) returns (.hadoop.hdfs.FsyncResponseProto); */ public abstract void fsync( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setTimes(.hadoop.hdfs.SetTimesRequestProto) returns (.hadoop.hdfs.SetTimesResponseProto); */ public abstract void setTimes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc createSymlink(.hadoop.hdfs.CreateSymlinkRequestProto) returns (.hadoop.hdfs.CreateSymlinkResponseProto); */ public abstract void createSymlink( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getLinkTarget(.hadoop.hdfs.GetLinkTargetRequestProto) returns (.hadoop.hdfs.GetLinkTargetResponseProto); */ public abstract void getLinkTarget( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc updateBlockForPipeline(.hadoop.hdfs.UpdateBlockForPipelineRequestProto) returns (.hadoop.hdfs.UpdateBlockForPipelineResponseProto); */ public abstract void updateBlockForPipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc updatePipeline(.hadoop.hdfs.UpdatePipelineRequestProto) returns (.hadoop.hdfs.UpdatePipelineResponseProto); */ public abstract void updatePipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getDelegationToken(.hadoop.common.GetDelegationTokenRequestProto) returns (.hadoop.common.GetDelegationTokenResponseProto); */ public abstract void getDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc renewDelegationToken(.hadoop.common.RenewDelegationTokenRequestProto) returns (.hadoop.common.RenewDelegationTokenResponseProto); */ public abstract void renewDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc cancelDelegationToken(.hadoop.common.CancelDelegationTokenRequestProto) returns (.hadoop.common.CancelDelegationTokenResponseProto); */ public abstract void cancelDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setBalancerBandwidth(.hadoop.hdfs.SetBalancerBandwidthRequestProto) returns (.hadoop.hdfs.SetBalancerBandwidthResponseProto); */ public abstract void setBalancerBandwidth( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getDataEncryptionKey(.hadoop.hdfs.GetDataEncryptionKeyRequestProto) returns (.hadoop.hdfs.GetDataEncryptionKeyResponseProto); */ public abstract void getDataEncryptionKey( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc createSnapshot(.hadoop.hdfs.CreateSnapshotRequestProto) returns (.hadoop.hdfs.CreateSnapshotResponseProto); */ public abstract void createSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc renameSnapshot(.hadoop.hdfs.RenameSnapshotRequestProto) returns (.hadoop.hdfs.RenameSnapshotResponseProto); */ public abstract void renameSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc allowSnapshot(.hadoop.hdfs.AllowSnapshotRequestProto) returns (.hadoop.hdfs.AllowSnapshotResponseProto); */ public abstract void allowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc disallowSnapshot(.hadoop.hdfs.DisallowSnapshotRequestProto) returns (.hadoop.hdfs.DisallowSnapshotResponseProto); */ public abstract void disallowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getSnapshottableDirListing(.hadoop.hdfs.GetSnapshottableDirListingRequestProto) returns (.hadoop.hdfs.GetSnapshottableDirListingResponseProto); */ public abstract void getSnapshottableDirListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc deleteSnapshot(.hadoop.hdfs.DeleteSnapshotRequestProto) returns (.hadoop.hdfs.DeleteSnapshotResponseProto); */ public abstract void deleteSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getSnapshotDiffReport(.hadoop.hdfs.GetSnapshotDiffReportRequestProto) returns (.hadoop.hdfs.GetSnapshotDiffReportResponseProto); */ public abstract void getSnapshotDiffReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getSnapshotDiffReportListing(.hadoop.hdfs.GetSnapshotDiffReportListingRequestProto) returns (.hadoop.hdfs.GetSnapshotDiffReportListingResponseProto); */ public abstract void getSnapshotDiffReportListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc isFileClosed(.hadoop.hdfs.IsFileClosedRequestProto) returns (.hadoop.hdfs.IsFileClosedResponseProto); */ public abstract void isFileClosed( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc modifyAclEntries(.hadoop.hdfs.ModifyAclEntriesRequestProto) returns (.hadoop.hdfs.ModifyAclEntriesResponseProto); */ public abstract void modifyAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeAclEntries(.hadoop.hdfs.RemoveAclEntriesRequestProto) returns (.hadoop.hdfs.RemoveAclEntriesResponseProto); */ public abstract void removeAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeDefaultAcl(.hadoop.hdfs.RemoveDefaultAclRequestProto) returns (.hadoop.hdfs.RemoveDefaultAclResponseProto); */ public abstract void removeDefaultAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeAcl(.hadoop.hdfs.RemoveAclRequestProto) returns (.hadoop.hdfs.RemoveAclResponseProto); */ public abstract void removeAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setAcl(.hadoop.hdfs.SetAclRequestProto) returns (.hadoop.hdfs.SetAclResponseProto); */ public abstract void setAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getAclStatus(.hadoop.hdfs.GetAclStatusRequestProto) returns (.hadoop.hdfs.GetAclStatusResponseProto); */ public abstract void getAclStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setXAttr(.hadoop.hdfs.SetXAttrRequestProto) returns (.hadoop.hdfs.SetXAttrResponseProto); */ public abstract void setXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getXAttrs(.hadoop.hdfs.GetXAttrsRequestProto) returns (.hadoop.hdfs.GetXAttrsResponseProto); */ public abstract void getXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listXAttrs(.hadoop.hdfs.ListXAttrsRequestProto) returns (.hadoop.hdfs.ListXAttrsResponseProto); */ public abstract void listXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeXAttr(.hadoop.hdfs.RemoveXAttrRequestProto) returns (.hadoop.hdfs.RemoveXAttrResponseProto); */ public abstract void removeXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc checkAccess(.hadoop.hdfs.CheckAccessRequestProto) returns (.hadoop.hdfs.CheckAccessResponseProto); */ public abstract void checkAccess( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc createEncryptionZone(.hadoop.hdfs.CreateEncryptionZoneRequestProto) returns (.hadoop.hdfs.CreateEncryptionZoneResponseProto); */ public abstract void createEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listEncryptionZones(.hadoop.hdfs.ListEncryptionZonesRequestProto) returns (.hadoop.hdfs.ListEncryptionZonesResponseProto); */ public abstract void listEncryptionZones( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc reencryptEncryptionZone(.hadoop.hdfs.ReencryptEncryptionZoneRequestProto) returns (.hadoop.hdfs.ReencryptEncryptionZoneResponseProto); */ public abstract void reencryptEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listReencryptionStatus(.hadoop.hdfs.ListReencryptionStatusRequestProto) returns (.hadoop.hdfs.ListReencryptionStatusResponseProto); */ public abstract void listReencryptionStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getEZForPath(.hadoop.hdfs.GetEZForPathRequestProto) returns (.hadoop.hdfs.GetEZForPathResponseProto); */ public abstract void getEZForPath( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc setErasureCodingPolicy(.hadoop.hdfs.SetErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.SetErasureCodingPolicyResponseProto); */ public abstract void setErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc unsetErasureCodingPolicy(.hadoop.hdfs.UnsetErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.UnsetErasureCodingPolicyResponseProto); */ public abstract void unsetErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getCurrentEditLogTxid(.hadoop.hdfs.GetCurrentEditLogTxidRequestProto) returns (.hadoop.hdfs.GetCurrentEditLogTxidResponseProto); */ public abstract void getCurrentEditLogTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getEditsFromTxid(.hadoop.hdfs.GetEditsFromTxidRequestProto) returns (.hadoop.hdfs.GetEditsFromTxidResponseProto); */ public abstract void getEditsFromTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getErasureCodingPolicies(.hadoop.hdfs.GetErasureCodingPoliciesRequestProto) returns (.hadoop.hdfs.GetErasureCodingPoliciesResponseProto); */ public abstract void getErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc addErasureCodingPolicies(.hadoop.hdfs.AddErasureCodingPoliciesRequestProto) returns (.hadoop.hdfs.AddErasureCodingPoliciesResponseProto); */ public abstract void addErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc removeErasureCodingPolicy(.hadoop.hdfs.RemoveErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.RemoveErasureCodingPolicyResponseProto); */ public abstract void removeErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc enableErasureCodingPolicy(.hadoop.hdfs.EnableErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.EnableErasureCodingPolicyResponseProto); */ public abstract void enableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc disableErasureCodingPolicy(.hadoop.hdfs.DisableErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.DisableErasureCodingPolicyResponseProto); */ public abstract void disableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getErasureCodingPolicy(.hadoop.hdfs.GetErasureCodingPolicyRequestProto) returns (.hadoop.hdfs.GetErasureCodingPolicyResponseProto); */ public abstract void getErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getErasureCodingCodecs(.hadoop.hdfs.GetErasureCodingCodecsRequestProto) returns (.hadoop.hdfs.GetErasureCodingCodecsResponseProto); */ public abstract void getErasureCodingCodecs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc getQuotaUsage(.hadoop.hdfs.GetQuotaUsageRequestProto) returns (.hadoop.hdfs.GetQuotaUsageResponseProto); */ public abstract void getQuotaUsage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc listOpenFiles(.hadoop.hdfs.ListOpenFilesRequestProto) returns (.hadoop.hdfs.ListOpenFilesResponseProto); */ public abstract void listOpenFiles( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); /** * rpc satisfyStoragePolicy(.hadoop.hdfs.SatisfyStoragePolicyRequestProto) returns (.hadoop.hdfs.SatisfyStoragePolicyResponseProto); */ public abstract void satisfyStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done); public static final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getServices().get(0); } public final io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.ServiceDescriptor getDescriptorForType() { return getDescriptor(); } public final void callMethod( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method, io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, io.prestosql.hadoop.$internal.com.google.protobuf.Message request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback< io.prestosql.hadoop.$internal.com.google.protobuf.Message> done) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.callMethod() given method descriptor for wrong " + "service type."); } switch(method.getIndex()) { case 0: this.getBlockLocations(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 1: this.getServerDefaults(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 2: this.create(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 3: this.append(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 4: this.setReplication(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 5: this.setStoragePolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 6: this.unsetStoragePolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 7: this.getStoragePolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 8: this.getStoragePolicies(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 9: this.setPermission(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 10: this.setOwner(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 11: this.abandonBlock(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 12: this.addBlock(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 13: this.getAdditionalDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 14: this.complete(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 15: this.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 16: this.concat(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 17: this.truncate(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 18: this.rename(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 19: this.rename2(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 20: this.delete(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 21: this.mkdirs(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 22: this.getListing(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 23: this.renewLease(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 24: this.recoverLease(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 25: this.getFsStats(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 26: this.getFsReplicatedBlockStats(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 27: this.getFsECBlockGroupStats(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 28: this.getDatanodeReport(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 29: this.getDatanodeStorageReport(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 30: this.getPreferredBlockSize(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 31: this.setSafeMode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 32: this.saveNamespace(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 33: this.rollEdits(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 34: this.restoreFailedStorage(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 35: this.refreshNodes(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 36: this.finalizeUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 37: this.upgradeStatus(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 38: this.rollingUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 39: this.listCorruptFileBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 40: this.metaSave(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 41: this.getFileInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 42: this.getLocatedFileInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 43: this.addCacheDirective(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 44: this.modifyCacheDirective(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 45: this.removeCacheDirective(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 46: this.listCacheDirectives(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 47: this.addCachePool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 48: this.modifyCachePool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 49: this.removeCachePool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 50: this.listCachePools(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 51: this.getFileLinkInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 52: this.getContentSummary(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 53: this.setQuota(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 54: this.fsync(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 55: this.setTimes(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 56: this.createSymlink(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 57: this.getLinkTarget(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 58: this.updateBlockForPipeline(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 59: this.updatePipeline(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 60: this.getDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 61: this.renewDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 62: this.cancelDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 63: this.setBalancerBandwidth(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 64: this.getDataEncryptionKey(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 65: this.createSnapshot(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 66: this.renameSnapshot(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 67: this.allowSnapshot(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 68: this.disallowSnapshot(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 69: this.getSnapshottableDirListing(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 70: this.deleteSnapshot(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 71: this.getSnapshotDiffReport(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 72: this.getSnapshotDiffReportListing(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 73: this.isFileClosed(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 74: this.modifyAclEntries(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 75: this.removeAclEntries(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 76: this.removeDefaultAcl(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 77: this.removeAcl(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 78: this.setAcl(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 79: this.getAclStatus(controller, (org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 80: this.setXAttr(controller, (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 81: this.getXAttrs(controller, (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 82: this.listXAttrs(controller, (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 83: this.removeXAttr(controller, (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 84: this.checkAccess(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 85: this.createEncryptionZone(controller, (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 86: this.listEncryptionZones(controller, (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 87: this.reencryptEncryptionZone(controller, (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 88: this.listReencryptionStatus(controller, (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 89: this.getEZForPath(controller, (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 90: this.setErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 91: this.unsetErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 92: this.getCurrentEditLogTxid(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 93: this.getEditsFromTxid(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 94: this.getErasureCodingPolicies(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 95: this.addErasureCodingPolicies(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 96: this.removeErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 97: this.enableErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 98: this.disableErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 99: this.getErasureCodingPolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 100: this.getErasureCodingCodecs(controller, (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 101: this.getQuotaUsage(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 102: this.listOpenFiles(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 103: this.satisfyStoragePolicy(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto)request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.specializeCallback( done)); return; default: throw new java.lang.AssertionError("Can't get here."); } } public final io.prestosql.hadoop.$internal.com.google.protobuf.Message getRequestPrototype( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.getDefaultInstance(); case 14: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.getDefaultInstance(); case 15: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance(); case 16: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.getDefaultInstance(); case 19: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.getDefaultInstance(); case 20: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.getDefaultInstance(); case 21: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.getDefaultInstance(); case 22: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.getDefaultInstance(); case 23: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.getDefaultInstance(); case 24: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.getDefaultInstance(); case 25: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.getDefaultInstance(); case 26: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto.getDefaultInstance(); case 27: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto.getDefaultInstance(); case 28: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.getDefaultInstance(); case 29: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto.getDefaultInstance(); case 30: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.getDefaultInstance(); case 31: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.getDefaultInstance(); case 32: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.getDefaultInstance(); case 33: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto.getDefaultInstance(); case 34: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.getDefaultInstance(); case 35: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.getDefaultInstance(); case 36: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.getDefaultInstance(); case 37: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto.getDefaultInstance(); case 38: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto.getDefaultInstance(); case 39: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.getDefaultInstance(); case 40: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.getDefaultInstance(); case 41: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.getDefaultInstance(); case 42: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto.getDefaultInstance(); case 43: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto.getDefaultInstance(); case 44: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto.getDefaultInstance(); case 45: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto.getDefaultInstance(); case 46: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto.getDefaultInstance(); case 47: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto.getDefaultInstance(); case 48: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto.getDefaultInstance(); case 49: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto.getDefaultInstance(); case 50: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto.getDefaultInstance(); case 51: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.getDefaultInstance(); case 52: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.getDefaultInstance(); case 53: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.getDefaultInstance(); case 54: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.getDefaultInstance(); case 55: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.getDefaultInstance(); case 56: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.getDefaultInstance(); case 57: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.getDefaultInstance(); case 58: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.getDefaultInstance(); case 59: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.getDefaultInstance(); case 60: return org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto.getDefaultInstance(); case 61: return org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto.getDefaultInstance(); case 62: return org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto.getDefaultInstance(); case 63: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.getDefaultInstance(); case 64: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto.getDefaultInstance(); case 65: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto.getDefaultInstance(); case 66: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto.getDefaultInstance(); case 67: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto.getDefaultInstance(); case 68: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto.getDefaultInstance(); case 69: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto.getDefaultInstance(); case 70: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto.getDefaultInstance(); case 71: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto.getDefaultInstance(); case 72: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto.getDefaultInstance(); case 73: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto.getDefaultInstance(); case 74: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto.getDefaultInstance(); case 75: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto.getDefaultInstance(); case 76: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto.getDefaultInstance(); case 77: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto.getDefaultInstance(); case 78: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto.getDefaultInstance(); case 79: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto.getDefaultInstance(); case 80: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto.getDefaultInstance(); case 81: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto.getDefaultInstance(); case 82: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto.getDefaultInstance(); case 83: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto.getDefaultInstance(); case 84: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto.getDefaultInstance(); case 85: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto.getDefaultInstance(); case 86: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto.getDefaultInstance(); case 87: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto.getDefaultInstance(); case 88: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto.getDefaultInstance(); case 89: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto.getDefaultInstance(); case 90: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto.getDefaultInstance(); case 91: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto.getDefaultInstance(); case 92: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto.getDefaultInstance(); case 93: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto.getDefaultInstance(); case 94: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto.getDefaultInstance(); case 95: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto.getDefaultInstance(); case 96: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto.getDefaultInstance(); case 97: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto.getDefaultInstance(); case 98: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto.getDefaultInstance(); case 99: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto.getDefaultInstance(); case 100: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto.getDefaultInstance(); case 101: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto.getDefaultInstance(); case 102: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto.getDefaultInstance(); case 103: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public final io.prestosql.hadoop.$internal.com.google.protobuf.Message getResponsePrototype( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getResponsePrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance(); case 4: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance(); case 5: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.getDefaultInstance(); case 6: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.getDefaultInstance(); case 7: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.getDefaultInstance(); case 8: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.getDefaultInstance(); case 9: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance(); case 10: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance(); case 11: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance(); case 12: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance(); case 13: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance(); case 14: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance(); case 15: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(); case 16: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance(); case 19: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance(); case 20: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance(); case 21: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance(); case 22: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance(); case 23: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance(); case 24: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance(); case 25: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance(); case 26: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.getDefaultInstance(); case 27: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.getDefaultInstance(); case 28: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance(); case 29: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.getDefaultInstance(); case 30: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance(); case 31: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance(); case 32: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance(); case 33: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.getDefaultInstance(); case 34: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance(); case 35: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance(); case 36: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance(); case 37: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.getDefaultInstance(); case 38: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.getDefaultInstance(); case 39: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance(); case 40: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance(); case 41: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance(); case 42: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.getDefaultInstance(); case 43: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.getDefaultInstance(); case 44: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.getDefaultInstance(); case 45: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.getDefaultInstance(); case 46: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.getDefaultInstance(); case 47: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.getDefaultInstance(); case 48: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.getDefaultInstance(); case 49: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.getDefaultInstance(); case 50: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.getDefaultInstance(); case 51: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance(); case 52: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance(); case 53: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance(); case 54: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance(); case 55: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance(); case 56: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance(); case 57: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance(); case 58: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance(); case 59: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance(); case 60: return org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.getDefaultInstance(); case 61: return org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.getDefaultInstance(); case 62: return org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.getDefaultInstance(); case 63: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance(); case 64: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.getDefaultInstance(); case 65: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.getDefaultInstance(); case 66: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.getDefaultInstance(); case 67: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.getDefaultInstance(); case 68: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.getDefaultInstance(); case 69: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.getDefaultInstance(); case 70: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.getDefaultInstance(); case 71: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.getDefaultInstance(); case 72: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.getDefaultInstance(); case 73: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.getDefaultInstance(); case 74: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto.getDefaultInstance(); case 75: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto.getDefaultInstance(); case 76: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto.getDefaultInstance(); case 77: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto.getDefaultInstance(); case 78: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto.getDefaultInstance(); case 79: return org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto.getDefaultInstance(); case 80: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto.getDefaultInstance(); case 81: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto.getDefaultInstance(); case 82: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto.getDefaultInstance(); case 83: return org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto.getDefaultInstance(); case 84: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.getDefaultInstance(); case 85: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto.getDefaultInstance(); case 86: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto.getDefaultInstance(); case 87: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneResponseProto.getDefaultInstance(); case 88: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusResponseProto.getDefaultInstance(); case 89: return org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto.getDefaultInstance(); case 90: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto.getDefaultInstance(); case 91: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto.getDefaultInstance(); case 92: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.getDefaultInstance(); case 93: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.getDefaultInstance(); case 94: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto.getDefaultInstance(); case 95: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesResponseProto.getDefaultInstance(); case 96: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyResponseProto.getDefaultInstance(); case 97: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyResponseProto.getDefaultInstance(); case 98: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyResponseProto.getDefaultInstance(); case 99: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto.getDefaultInstance(); case 100: return org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto.getDefaultInstance(); case 101: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.getDefaultInstance(); case 102: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.getDefaultInstance(); case 103: return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public static Stub newStub( io.prestosql.hadoop.$internal.com.google.protobuf.RpcChannel channel) { return new Stub(channel); } public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol implements Interface { private Stub(io.prestosql.hadoop.$internal.com.google.protobuf.RpcChannel channel) { this.channel = channel; } private final io.prestosql.hadoop.$internal.com.google.protobuf.RpcChannel channel; public io.prestosql.hadoop.$internal.com.google.protobuf.RpcChannel getChannel() { return channel; } public void getBlockLocations( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(0), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance())); } public void getServerDefaults( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(1), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance())); } public void create( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance())); } public void append( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance())); } public void setReplication( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance())); } public void setStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.getDefaultInstance())); } public void unsetStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.getDefaultInstance())); } public void getStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.getDefaultInstance())); } public void getStoragePolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.getDefaultInstance())); } public void setPermission( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance())); } public void setOwner( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance())); } public void abandonBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance())); } public void addBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance())); } public void getAdditionalDatanode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance())); } public void complete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance())); } public void reportBadBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance())); } public void concat( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance())); } public void truncate( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.getDefaultInstance())); } public void rename( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance())); } public void rename2( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance())); } public void delete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(20), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance())); } public void mkdirs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(21), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance())); } public void getListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(22), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance())); } public void renewLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(23), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance())); } public void recoverLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(24), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance())); } public void getFsStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(25), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance())); } public void getFsReplicatedBlockStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.getDefaultInstance())); } public void getFsECBlockGroupStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.getDefaultInstance())); } public void getDatanodeReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance())); } public void getDatanodeStorageReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.getDefaultInstance())); } public void getPreferredBlockSize( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance())); } public void setSafeMode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance())); } public void saveNamespace( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance())); } public void rollEdits( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.getDefaultInstance())); } public void restoreFailedStorage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance())); } public void refreshNodes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance())); } public void finalizeUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance())); } public void upgradeStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.getDefaultInstance())); } public void rollingUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.getDefaultInstance())); } public void listCorruptFileBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance())); } public void metaSave( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance())); } public void getFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance())); } public void getLocatedFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.getDefaultInstance())); } public void addCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.getDefaultInstance())); } public void modifyCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.getDefaultInstance())); } public void removeCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.getDefaultInstance())); } public void listCacheDirectives( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.getDefaultInstance())); } public void addCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.getDefaultInstance())); } public void modifyCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.getDefaultInstance())); } public void removeCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.getDefaultInstance())); } public void listCachePools( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.getDefaultInstance())); } public void getFileLinkInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance())); } public void getContentSummary( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance())); } public void setQuota( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance())); } public void fsync( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance())); } public void setTimes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance())); } public void createSymlink( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(56), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance())); } public void getLinkTarget( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(57), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance())); } public void updateBlockForPipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(58), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance())); } public void updatePipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(59), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance())); } public void getDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(60), controller, request, org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.class, org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.getDefaultInstance())); } public void renewDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(61), controller, request, org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.class, org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.getDefaultInstance())); } public void cancelDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(62), controller, request, org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.class, org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.getDefaultInstance())); } public void setBalancerBandwidth( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(63), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance())); } public void getDataEncryptionKey( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(64), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.getDefaultInstance())); } public void createSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(65), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.getDefaultInstance())); } public void renameSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(66), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.getDefaultInstance())); } public void allowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(67), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.getDefaultInstance())); } public void disallowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(68), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.getDefaultInstance())); } public void getSnapshottableDirListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(69), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.getDefaultInstance())); } public void deleteSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(70), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.getDefaultInstance())); } public void getSnapshotDiffReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(71), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.getDefaultInstance())); } public void getSnapshotDiffReportListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(72), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.getDefaultInstance())); } public void isFileClosed( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(73), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.getDefaultInstance())); } public void modifyAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(74), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto.getDefaultInstance())); } public void removeAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(75), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto.getDefaultInstance())); } public void removeDefaultAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(76), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto.getDefaultInstance())); } public void removeAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(77), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto.getDefaultInstance())); } public void setAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(78), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto.getDefaultInstance())); } public void getAclStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(79), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto.getDefaultInstance())); } public void setXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(80), controller, request, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto.getDefaultInstance())); } public void getXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(81), controller, request, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto.getDefaultInstance())); } public void listXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(82), controller, request, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto.getDefaultInstance())); } public void removeXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(83), controller, request, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto.getDefaultInstance())); } public void checkAccess( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(84), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.getDefaultInstance())); } public void createEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(85), controller, request, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto.getDefaultInstance())); } public void listEncryptionZones( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(86), controller, request, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto.getDefaultInstance())); } public void reencryptEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(87), controller, request, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneResponseProto.getDefaultInstance())); } public void listReencryptionStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(88), controller, request, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusResponseProto.getDefaultInstance())); } public void getEZForPath( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(89), controller, request, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto.getDefaultInstance())); } public void setErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(90), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto.getDefaultInstance())); } public void unsetErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(91), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto.getDefaultInstance())); } public void getCurrentEditLogTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(92), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.getDefaultInstance())); } public void getEditsFromTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(93), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.getDefaultInstance())); } public void getErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(94), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto.getDefaultInstance())); } public void addErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(95), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesResponseProto.getDefaultInstance())); } public void removeErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(96), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyResponseProto.getDefaultInstance())); } public void enableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(97), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyResponseProto.getDefaultInstance())); } public void disableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(98), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyResponseProto.getDefaultInstance())); } public void getErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(99), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto.getDefaultInstance())); } public void getErasureCodingCodecs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(100), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto.getDefaultInstance())); } public void getQuotaUsage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(101), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.getDefaultInstance())); } public void listOpenFiles( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(102), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.getDefaultInstance())); } public void satisfyStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto request, io.prestosql.hadoop.$internal.com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(103), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.getDefaultInstance(), io.prestosql.hadoop.$internal.com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.getDefaultInstance())); } } public static BlockingInterface newBlockingStub( io.prestosql.hadoop.$internal.com.google.protobuf.BlockingRpcChannel channel) { return new BlockingStub(channel); } public interface BlockingInterface { public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto getBlockLocations( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto getServerDefaults( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto create( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto append( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto setReplication( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto setStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto unsetStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto getStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto getStoragePolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto setPermission( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto setOwner( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto abandonBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto addBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto getAdditionalDatanode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto complete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto concat( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto truncate( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto rename( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto rename2( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto delete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto mkdirs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto getListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto renewLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto recoverLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto getFsStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto getFsReplicatedBlockStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto getFsECBlockGroupStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto getDatanodeReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto getDatanodeStorageReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto getPreferredBlockSize( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto setSafeMode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto saveNamespace( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto rollEdits( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto restoreFailedStorage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto refreshNodes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto finalizeUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto upgradeStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto rollingUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto listCorruptFileBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto metaSave( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto getFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto getLocatedFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto addCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto modifyCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto removeCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto listCacheDirectives( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto addCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto modifyCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto removeCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto listCachePools( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto getFileLinkInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto getContentSummary( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto setQuota( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto fsync( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto setTimes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto createSymlink( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto getLinkTarget( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto updateBlockForPipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto updatePipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto getDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto renewDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto cancelDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto setBalancerBandwidth( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto getDataEncryptionKey( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto createSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto renameSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto allowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto disallowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto getSnapshottableDirListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto deleteSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto getSnapshotDiffReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto getSnapshotDiffReportListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto isFileClosed( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto modifyAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto removeAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto removeDefaultAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto removeAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto setAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto getAclStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto setXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto getXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto listXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto removeXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto checkAccess( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto createEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto listEncryptionZones( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneResponseProto reencryptEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusResponseProto listReencryptionStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto getEZForPath( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto setErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto unsetErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto getCurrentEditLogTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto getEditsFromTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto getErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesResponseProto addErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyResponseProto removeErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyResponseProto enableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyResponseProto disableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto getErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto getErasureCodingCodecs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto getQuotaUsage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto listOpenFiles( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto satisfyStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { private BlockingStub(io.prestosql.hadoop.$internal.com.google.protobuf.BlockingRpcChannel channel) { this.channel = channel; } private final io.prestosql.hadoop.$internal.com.google.protobuf.BlockingRpcChannel channel; public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto getBlockLocations( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(0), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto getServerDefaults( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(1), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto create( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto append( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto setReplication( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto setStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto unsetStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto getStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto getStoragePolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto setPermission( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto setOwner( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto abandonBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto addBlock( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto getAdditionalDatanode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto complete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto concat( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto truncate( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto rename( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto rename2( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto delete( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(20), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto mkdirs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(21), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto getListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(22), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto renewLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(23), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto recoverLease( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(24), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto getFsStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(25), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto getFsReplicatedBlockStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto getFsECBlockGroupStats( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto getDatanodeReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto getDatanodeStorageReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto getPreferredBlockSize( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto setSafeMode( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto saveNamespace( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto rollEdits( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto restoreFailedStorage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto refreshNodes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto finalizeUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto upgradeStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto rollingUpgrade( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto listCorruptFileBlocks( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto metaSave( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto getFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto getLocatedFileInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto addCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto modifyCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto removeCacheDirective( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto listCacheDirectives( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto addCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto modifyCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto removeCachePool( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto listCachePools( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto getFileLinkInfo( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto getContentSummary( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto setQuota( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto fsync( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto setTimes( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto createSymlink( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(56), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto getLinkTarget( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(57), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto updateBlockForPipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(58), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto updatePipeline( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(59), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance()); } public org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto getDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(60), controller, request, org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.getDefaultInstance()); } public org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto renewDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(61), controller, request, org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.getDefaultInstance()); } public org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto cancelDelegationToken( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(62), controller, request, org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto setBalancerBandwidth( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(63), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto getDataEncryptionKey( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(64), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto createSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(65), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto renameSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(66), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto allowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(67), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto disallowSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(68), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto getSnapshottableDirListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(69), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto deleteSnapshot( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(70), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto getSnapshotDiffReport( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(71), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto getSnapshotDiffReportListing( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(72), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto isFileClosed( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(73), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto modifyAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(74), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto removeAclEntries( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(75), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto removeDefaultAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(76), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto removeAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(77), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto setAcl( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(78), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto getAclStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(79), controller, request, org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto setXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(80), controller, request, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto getXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(81), controller, request, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto listXAttrs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(82), controller, request, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto removeXAttr( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(83), controller, request, org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto checkAccess( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(84), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto createEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(85), controller, request, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto listEncryptionZones( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(86), controller, request, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneResponseProto reencryptEncryptionZone( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(87), controller, request, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusResponseProto listReencryptionStatus( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(88), controller, request, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto getEZForPath( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(89), controller, request, org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto setErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(90), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto unsetErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(91), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto getCurrentEditLogTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(92), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCurrentEditLogTxidResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto getEditsFromTxid( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(93), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto getErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(94), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesResponseProto addErasureCodingPolicies( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(95), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyResponseProto removeErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(96), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyResponseProto enableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(97), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyResponseProto disableErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(98), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto getErasureCodingPolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(99), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto getErasureCodingCodecs( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(100), controller, request, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto getQuotaUsage( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(101), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto listOpenFiles( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(102), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListOpenFilesResponseProto.getDefaultInstance()); } public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto satisfyStoragePolicy( io.prestosql.hadoop.$internal.com.google.protobuf.RpcController controller, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto request) throws io.prestosql.hadoop.$internal.com.google.protobuf.ServiceException { return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto) channel.callBlockingMethod( getDescriptor().getMethods().get(103), controller, request, org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto.getDefaultInstance()); } } // @@protoc_insertion_point(class_scope:hadoop.hdfs.ClientNamenodeProtocol) } private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetBlockLocationsRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetBlockLocationsRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetBlockLocationsResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetBlockLocationsResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetServerDefaultsRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetServerDefaultsRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetServerDefaultsResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetServerDefaultsResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CreateRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CreateRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CreateResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CreateResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AppendRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AppendRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AppendResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AppendResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetReplicationRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetReplicationRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetReplicationResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetReplicationResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetStoragePolicyRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetStoragePolicyRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetStoragePolicyResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetStoragePolicyResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_UnsetStoragePolicyRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_UnsetStoragePolicyRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_UnsetStoragePolicyResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_UnsetStoragePolicyResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetStoragePolicyRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetStoragePolicyRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetStoragePolicyResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetStoragePolicyResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetStoragePoliciesRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetStoragePoliciesRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetStoragePoliciesResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetStoragePoliciesResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetPermissionRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetPermissionRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetPermissionResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetPermissionResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetOwnerRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetOwnerRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetOwnerResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetOwnerResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AbandonBlockRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AbandonBlockRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AbandonBlockResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AbandonBlockResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AddBlockRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AddBlockRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AddBlockResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AddBlockResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetAdditionalDatanodeRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetAdditionalDatanodeRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetAdditionalDatanodeResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetAdditionalDatanodeResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CompleteRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CompleteRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CompleteResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CompleteResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ReportBadBlocksRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ReportBadBlocksRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ReportBadBlocksResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ReportBadBlocksResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ConcatRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ConcatRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ConcatResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ConcatResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_TruncateRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_TruncateRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_TruncateResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_TruncateResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RenameRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RenameRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RenameResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RenameResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_Rename2RequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_Rename2RequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_Rename2ResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_Rename2ResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DeleteRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DeleteRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DeleteResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DeleteResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_MkdirsRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_MkdirsRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_MkdirsResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_MkdirsResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetListingRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetListingRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetListingResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetListingResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetSnapshottableDirListingRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetSnapshottableDirListingRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetSnapshottableDirListingResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetSnapshottableDirListingResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetSnapshotDiffReportRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetSnapshotDiffReportRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetSnapshotDiffReportResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetSnapshotDiffReportResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetSnapshotDiffReportListingRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetSnapshotDiffReportListingRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetSnapshotDiffReportListingResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetSnapshotDiffReportListingResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RenewLeaseRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RenewLeaseRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RenewLeaseResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RenewLeaseResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RecoverLeaseRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RecoverLeaseRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RecoverLeaseResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RecoverLeaseResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetFsStatusRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetFsStatusRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetFsStatsResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetFsStatsResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetFsECBlockGroupStatsRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetFsECBlockGroupStatsRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetFsECBlockGroupStatsResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetFsECBlockGroupStatsResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetDatanodeReportRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetDatanodeReportRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetDatanodeReportResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetDatanodeReportResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetDatanodeStorageReportRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetDatanodeStorageReportRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DatanodeStorageReportProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DatanodeStorageReportProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetDatanodeStorageReportResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetDatanodeStorageReportResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetPreferredBlockSizeRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetPreferredBlockSizeRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetPreferredBlockSizeResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetPreferredBlockSizeResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetSafeModeRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetSafeModeRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetSafeModeResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetSafeModeResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SaveNamespaceRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SaveNamespaceRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SaveNamespaceResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SaveNamespaceResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RollEditsRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RollEditsRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RollEditsResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RollEditsResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RestoreFailedStorageRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RestoreFailedStorageRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RestoreFailedStorageResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RestoreFailedStorageResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RefreshNodesRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RefreshNodesRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RefreshNodesResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RefreshNodesResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_FinalizeUpgradeRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_FinalizeUpgradeRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_FinalizeUpgradeResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_FinalizeUpgradeResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_UpgradeStatusRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_UpgradeStatusRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_UpgradeStatusResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_UpgradeStatusResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RollingUpgradeRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RollingUpgradeRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RollingUpgradeInfoProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RollingUpgradeInfoProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RollingUpgradeResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RollingUpgradeResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ListCorruptFileBlocksRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ListCorruptFileBlocksRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ListCorruptFileBlocksResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ListCorruptFileBlocksResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_MetaSaveRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_MetaSaveRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_MetaSaveResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_MetaSaveResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetFileInfoRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetFileInfoRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetFileInfoResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetFileInfoResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetLocatedFileInfoRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetLocatedFileInfoRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetLocatedFileInfoResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetLocatedFileInfoResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_IsFileClosedRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_IsFileClosedRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_IsFileClosedResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_IsFileClosedResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CacheDirectiveInfoProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CacheDirectiveInfoProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CacheDirectiveInfoExpirationProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CacheDirectiveInfoExpirationProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CacheDirectiveStatsProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CacheDirectiveStatsProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AddCacheDirectiveRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AddCacheDirectiveRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AddCacheDirectiveResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AddCacheDirectiveResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ModifyCacheDirectiveRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ModifyCacheDirectiveRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ModifyCacheDirectiveResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ModifyCacheDirectiveResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RemoveCacheDirectiveRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RemoveCacheDirectiveRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RemoveCacheDirectiveResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RemoveCacheDirectiveResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ListCacheDirectivesRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ListCacheDirectivesRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CacheDirectiveEntryProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CacheDirectiveEntryProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ListCacheDirectivesResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ListCacheDirectivesResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CachePoolInfoProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CachePoolInfoProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CachePoolStatsProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CachePoolStatsProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AddCachePoolRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AddCachePoolRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AddCachePoolResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AddCachePoolResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ModifyCachePoolRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ModifyCachePoolRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ModifyCachePoolResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ModifyCachePoolResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RemoveCachePoolRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RemoveCachePoolRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RemoveCachePoolResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RemoveCachePoolResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ListCachePoolsRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ListCachePoolsRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ListCachePoolsResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ListCachePoolsResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CachePoolEntryProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CachePoolEntryProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetFileLinkInfoRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetFileLinkInfoRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetFileLinkInfoResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetFileLinkInfoResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetContentSummaryRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetContentSummaryRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetContentSummaryResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetContentSummaryResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetQuotaUsageRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetQuotaUsageRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetQuotaUsageResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetQuotaUsageResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetQuotaRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetQuotaRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetQuotaResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetQuotaResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_FsyncRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_FsyncRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_FsyncResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_FsyncResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetTimesRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetTimesRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetTimesResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetTimesResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CreateSymlinkRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CreateSymlinkRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CreateSymlinkResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CreateSymlinkResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetLinkTargetRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetLinkTargetRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetLinkTargetResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetLinkTargetResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_UpdateBlockForPipelineRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_UpdateBlockForPipelineRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_UpdateBlockForPipelineResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_UpdateBlockForPipelineResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_UpdatePipelineRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_UpdatePipelineRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_UpdatePipelineResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_UpdatePipelineResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetBalancerBandwidthRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetBalancerBandwidthRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SetBalancerBandwidthResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SetBalancerBandwidthResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetDataEncryptionKeyRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetDataEncryptionKeyRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetDataEncryptionKeyResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetDataEncryptionKeyResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CreateSnapshotRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CreateSnapshotRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CreateSnapshotResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CreateSnapshotResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RenameSnapshotRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RenameSnapshotRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_RenameSnapshotResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_RenameSnapshotResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AllowSnapshotRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AllowSnapshotRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_AllowSnapshotResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_AllowSnapshotResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DisallowSnapshotRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DisallowSnapshotRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DisallowSnapshotResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DisallowSnapshotResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DeleteSnapshotRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DeleteSnapshotRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_DeleteSnapshotResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_DeleteSnapshotResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CheckAccessRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CheckAccessRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_CheckAccessResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_CheckAccessResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetCurrentEditLogTxidRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetCurrentEditLogTxidRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetCurrentEditLogTxidResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetCurrentEditLogTxidResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetEditsFromTxidRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetEditsFromTxidRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_GetEditsFromTxidResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_GetEditsFromTxidResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ListOpenFilesRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ListOpenFilesRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_OpenFilesBatchResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_OpenFilesBatchResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_ListOpenFilesResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_ListOpenFilesResponseProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SatisfyStoragePolicyRequestProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SatisfyStoragePolicyRequestProto_fieldAccessorTable; private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.Descriptor internal_static_hadoop_hdfs_SatisfyStoragePolicyResponseProto_descriptor; private static io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hadoop_hdfs_SatisfyStoragePolicyResponseProto_fieldAccessorTable; public static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\034ClientNamenodeProtocol.proto\022\013hadoop.h" + "dfs\032\016Security.proto\032\nhdfs.proto\032\tacl.pro" + "to\032\013xattr.proto\032\020encryption.proto\032\rinoti" + "fy.proto\032\023erasurecoding.proto\"L\n\035GetBloc" + "kLocationsRequestProto\022\013\n\003src\030\001 \002(\t\022\016\n\006o" + "ffset\030\002 \002(\004\022\016\n\006length\030\003 \002(\004\"T\n\036GetBlockL" + "ocationsResponseProto\0222\n\tlocations\030\001 \001(\013" + "2\037.hadoop.hdfs.LocatedBlocksProto\"\037\n\035Get" + "ServerDefaultsRequestProto\"\\\n\036GetServerD" + "efaultsResponseProto\022:\n\016serverDefaults\030\001", " \002(\0132\".hadoop.hdfs.FsServerDefaultsProto" + "\"\307\002\n\022CreateRequestProto\022\013\n\003src\030\001 \002(\t\022.\n\006" + "masked\030\002 \002(\0132\036.hadoop.hdfs.FsPermissionP" + "roto\022\022\n\nclientName\030\003 \002(\t\022\022\n\ncreateFlag\030\004" + " \002(\r\022\024\n\014createParent\030\005 \002(\010\022\023\n\013replicatio" + "n\030\006 \002(\r\022\021\n\tblockSize\030\007 \002(\004\022F\n\025cryptoProt" + "ocolVersion\030\010 \003(\0162\'.hadoop.hdfs.CryptoPr" + "otocolVersionProto\0220\n\010unmasked\030\t \001(\0132\036.h" + "adoop.hdfs.FsPermissionProto\022\024\n\014ecPolicy" + "Name\030\n \001(\t\"C\n\023CreateResponseProto\022,\n\002fs\030", "\001 \001(\0132 .hadoop.hdfs.HdfsFileStatusProto\"" + "C\n\022AppendRequestProto\022\013\n\003src\030\001 \002(\t\022\022\n\ncl" + "ientName\030\002 \002(\t\022\014\n\004flag\030\003 \001(\r\"t\n\023AppendRe" + "sponseProto\022-\n\005block\030\001 \001(\0132\036.hadoop.hdfs" + ".LocatedBlockProto\022.\n\004stat\030\002 \001(\0132 .hadoo" + "p.hdfs.HdfsFileStatusProto\">\n\032SetReplica" + "tionRequestProto\022\013\n\003src\030\001 \002(\t\022\023\n\013replica" + "tion\030\002 \002(\r\"-\n\033SetReplicationResponseProt" + "o\022\016\n\006result\030\001 \002(\010\"?\n\034SetStoragePolicyReq" + "uestProto\022\013\n\003src\030\001 \002(\t\022\022\n\npolicyName\030\002 \002", "(\t\"\037\n\035SetStoragePolicyResponseProto\"-\n\036U" + "nsetStoragePolicyRequestProto\022\013\n\003src\030\001 \002" + "(\t\"!\n\037UnsetStoragePolicyResponseProto\",\n" + "\034GetStoragePolicyRequestProto\022\014\n\004path\030\001 " + "\002(\t\"\\\n\035GetStoragePolicyResponseProto\022;\n\r" + "storagePolicy\030\001 \002(\0132$.hadoop.hdfs.BlockS" + "toragePolicyProto\" \n\036GetStoragePoliciesR" + "equestProto\"Y\n\037GetStoragePoliciesRespons" + "eProto\0226\n\010policies\030\001 \003(\0132$.hadoop.hdfs.B" + "lockStoragePolicyProto\"\\\n\031SetPermissionR", "equestProto\022\013\n\003src\030\001 \002(\t\0222\n\npermission\030\002" + " \002(\0132\036.hadoop.hdfs.FsPermissionProto\"\034\n\032" + "SetPermissionResponseProto\"H\n\024SetOwnerRe" + "questProto\022\013\n\003src\030\001 \002(\t\022\020\n\010username\030\002 \001(" + "\t\022\021\n\tgroupname\030\003 \001(\t\"\027\n\025SetOwnerResponse" + "Proto\"v\n\030AbandonBlockRequestProto\022*\n\001b\030\001" + " \002(\0132\037.hadoop.hdfs.ExtendedBlockProto\022\013\n" + "\003src\030\002 \002(\t\022\016\n\006holder\030\003 \002(\t\022\021\n\006fileId\030\004 \001" + "(\004:\0010\"\033\n\031AbandonBlockResponseProto\"\370\001\n\024A" + "ddBlockRequestProto\022\013\n\003src\030\001 \002(\t\022\022\n\nclie", "ntName\030\002 \002(\t\0221\n\010previous\030\003 \001(\0132\037.hadoop." + "hdfs.ExtendedBlockProto\0224\n\014excludeNodes\030" + "\004 \003(\0132\036.hadoop.hdfs.DatanodeInfoProto\022\021\n" + "\006fileId\030\005 \001(\004:\0010\022\024\n\014favoredNodes\030\006 \003(\t\022-" + "\n\005flags\030\007 \003(\0162\036.hadoop.hdfs.AddBlockFlag" + "Proto\"F\n\025AddBlockResponseProto\022-\n\005block\030" + "\001 \002(\0132\036.hadoop.hdfs.LocatedBlockProto\"\244\002" + "\n!GetAdditionalDatanodeRequestProto\022\013\n\003s" + "rc\030\001 \002(\t\022,\n\003blk\030\002 \002(\0132\037.hadoop.hdfs.Exte" + "ndedBlockProto\0221\n\texistings\030\003 \003(\0132\036.hado", "op.hdfs.DatanodeInfoProto\0220\n\010excludes\030\004 " + "\003(\0132\036.hadoop.hdfs.DatanodeInfoProto\022\032\n\022n" + "umAdditionalNodes\030\005 \002(\r\022\022\n\nclientName\030\006 " + "\002(\t\022\034\n\024existingStorageUuids\030\007 \003(\t\022\021\n\006fil" + "eId\030\010 \001(\004:\0010\"S\n\"GetAdditionalDatanodeRes" + "ponseProto\022-\n\005block\030\001 \002(\0132\036.hadoop.hdfs." + "LocatedBlockProto\"y\n\024CompleteRequestProt" + "o\022\013\n\003src\030\001 \002(\t\022\022\n\nclientName\030\002 \002(\t\022-\n\004la" + "st\030\003 \001(\0132\037.hadoop.hdfs.ExtendedBlockProt" + "o\022\021\n\006fileId\030\004 \001(\004:\0010\"\'\n\025CompleteResponse", "Proto\022\016\n\006result\030\001 \002(\010\"M\n\033ReportBadBlocks" + "RequestProto\022.\n\006blocks\030\001 \003(\0132\036.hadoop.hd" + "fs.LocatedBlockProto\"\036\n\034ReportBadBlocksR" + "esponseProto\"/\n\022ConcatRequestProto\022\013\n\003tr" + "g\030\001 \002(\t\022\014\n\004srcs\030\002 \003(\t\"\025\n\023ConcatResponseP" + "roto\"J\n\024TruncateRequestProto\022\013\n\003src\030\001 \002(" + "\t\022\021\n\tnewLength\030\002 \002(\004\022\022\n\nclientName\030\003 \002(\t" + "\"\'\n\025TruncateResponseProto\022\016\n\006result\030\001 \002(" + "\010\".\n\022RenameRequestProto\022\013\n\003src\030\001 \002(\t\022\013\n\003" + "dst\030\002 \002(\t\"%\n\023RenameResponseProto\022\016\n\006resu", "lt\030\001 \002(\010\"[\n\023Rename2RequestProto\022\013\n\003src\030\001" + " \002(\t\022\013\n\003dst\030\002 \002(\t\022\025\n\roverwriteDest\030\003 \002(\010" + "\022\023\n\013moveToTrash\030\004 \001(\010\"\026\n\024Rename2Response" + "Proto\"4\n\022DeleteRequestProto\022\013\n\003src\030\001 \002(\t" + "\022\021\n\trecursive\030\002 \002(\010\"%\n\023DeleteResponsePro" + "to\022\016\n\006result\030\001 \002(\010\"\231\001\n\022MkdirsRequestProt" + "o\022\013\n\003src\030\001 \002(\t\022.\n\006masked\030\002 \002(\0132\036.hadoop." + "hdfs.FsPermissionProto\022\024\n\014createParent\030\003" + " \002(\010\0220\n\010unmasked\030\004 \001(\0132\036.hadoop.hdfs.FsP" + "ermissionProto\"%\n\023MkdirsResponseProto\022\016\n", "\006result\030\001 \002(\010\"O\n\026GetListingRequestProto\022" + "\013\n\003src\030\001 \002(\t\022\022\n\nstartAfter\030\002 \002(\014\022\024\n\014need" + "Location\030\003 \002(\010\"N\n\027GetListingResponseProt" + "o\0223\n\007dirList\030\001 \001(\0132\".hadoop.hdfs.Directo" + "ryListingProto\"(\n&GetSnapshottableDirLis" + "tingRequestProto\"x\n\'GetSnapshottableDirL" + "istingResponseProto\022M\n\024snapshottableDirL" + "ist\030\001 \001(\0132/.hadoop.hdfs.SnapshottableDir" + "ectoryListingProto\"c\n!GetSnapshotDiffRep" + "ortRequestProto\022\024\n\014snapshotRoot\030\001 \002(\t\022\024\n", "\014fromSnapshot\030\002 \002(\t\022\022\n\ntoSnapshot\030\003 \002(\t\"" + "^\n\"GetSnapshotDiffReportResponseProto\0228\n" + "\ndiffReport\030\001 \002(\0132$.hadoop.hdfs.Snapshot" + "DiffReportProto\"\246\001\n(GetSnapshotDiffRepor" + "tListingRequestProto\022\024\n\014snapshotRoot\030\001 \002" + "(\t\022\024\n\014fromSnapshot\030\002 \002(\t\022\022\n\ntoSnapshot\030\003" + " \002(\t\022:\n\006cursor\030\004 \001(\0132*.hadoop.hdfs.Snaps" + "hotDiffReportCursorProto\"l\n)GetSnapshotD" + "iffReportListingResponseProto\022?\n\ndiffRep" + "ort\030\001 \002(\0132+.hadoop.hdfs.SnapshotDiffRepo", "rtListingProto\",\n\026RenewLeaseRequestProto" + "\022\022\n\nclientName\030\001 \002(\t\"\031\n\027RenewLeaseRespon" + "seProto\";\n\030RecoverLeaseRequestProto\022\013\n\003s" + "rc\030\001 \002(\t\022\022\n\nclientName\030\002 \002(\t\"+\n\031RecoverL" + "easeResponseProto\022\016\n\006result\030\001 \002(\010\"\031\n\027Get" + "FsStatusRequestProto\"\362\001\n\027GetFsStatsRespo" + "nseProto\022\020\n\010capacity\030\001 \002(\004\022\014\n\004used\030\002 \002(\004" + "\022\021\n\tremaining\030\003 \002(\004\022\030\n\020under_replicated\030" + "\004 \002(\004\022\026\n\016corrupt_blocks\030\005 \002(\004\022\026\n\016missing" + "_blocks\030\006 \002(\004\022\037\n\027missing_repl_one_blocks", "\030\007 \001(\004\022\030\n\020blocks_in_future\030\010 \001(\004\022\037\n\027pend" + "ing_deletion_blocks\030\t \001(\004\"\'\n%GetFsReplic" + "atedBlockStatsRequestProto\"\370\001\n&GetFsRepl" + "icatedBlockStatsResponseProto\022\026\n\016low_red" + "undancy\030\001 \002(\004\022\026\n\016corrupt_blocks\030\002 \002(\004\022\026\n" + "\016missing_blocks\030\003 \002(\004\022\037\n\027missing_repl_on" + "e_blocks\030\004 \002(\004\022\030\n\020blocks_in_future\030\005 \002(\004" + "\022\037\n\027pending_deletion_blocks\030\006 \002(\004\022*\n\"hig" + "hest_prio_low_redundancy_blocks\030\007 \001(\004\"$\n" + "\"GetFsECBlockGroupStatsRequestProto\"\324\001\n#", "GetFsECBlockGroupStatsResponseProto\022\026\n\016l" + "ow_redundancy\030\001 \002(\004\022\026\n\016corrupt_blocks\030\002 " + "\002(\004\022\026\n\016missing_blocks\030\003 \002(\004\022\030\n\020blocks_in" + "_future\030\004 \002(\004\022\037\n\027pending_deletion_blocks" + "\030\005 \002(\004\022*\n\"highest_prio_low_redundancy_bl" + "ocks\030\006 \001(\004\"S\n\035GetDatanodeReportRequestPr" + "oto\0222\n\004type\030\001 \002(\0162$.hadoop.hdfs.Datanode" + "ReportTypeProto\"L\n\036GetDatanodeReportResp" + "onseProto\022*\n\002di\030\001 \003(\0132\036.hadoop.hdfs.Data" + "nodeInfoProto\"Z\n$GetDatanodeStorageRepor", "tRequestProto\0222\n\004type\030\001 \002(\0162$.hadoop.hdf" + "s.DatanodeReportTypeProto\"\213\001\n\032DatanodeSt" + "orageReportProto\0224\n\014datanodeInfo\030\001 \002(\0132\036" + ".hadoop.hdfs.DatanodeInfoProto\0227\n\016storag" + "eReports\030\002 \003(\0132\037.hadoop.hdfs.StorageRepo" + "rtProto\"p\n%GetDatanodeStorageReportRespo" + "nseProto\022G\n\026datanodeStorageReports\030\001 \003(\013" + "2\'.hadoop.hdfs.DatanodeStorageReportProt" + "o\"5\n!GetPreferredBlockSizeRequestProto\022\020" + "\n\010filename\030\001 \002(\t\"3\n\"GetPreferredBlockSiz", "eResponseProto\022\r\n\005bsize\030\001 \002(\004\"c\n\027SetSafe" + "ModeRequestProto\0220\n\006action\030\001 \002(\0162 .hadoo" + "p.hdfs.SafeModeActionProto\022\026\n\007checked\030\002 " + "\001(\010:\005false\"*\n\030SetSafeModeResponseProto\022\016" + "\n\006result\030\001 \002(\010\"D\n\031SaveNamespaceRequestPr" + "oto\022\025\n\ntimeWindow\030\001 \001(\004:\0010\022\020\n\005txGap\030\002 \001(" + "\004:\0010\"1\n\032SaveNamespaceResponseProto\022\023\n\005sa" + "ved\030\001 \001(\010:\004true\"\027\n\025RollEditsRequestProto" + "\"0\n\026RollEditsResponseProto\022\026\n\016newSegment" + "TxId\030\001 \002(\004\"/\n RestoreFailedStorageReques", "tProto\022\013\n\003arg\030\001 \002(\t\"3\n!RestoreFailedStor" + "ageResponseProto\022\016\n\006result\030\001 \002(\010\"\032\n\030Refr" + "eshNodesRequestProto\"\033\n\031RefreshNodesResp" + "onseProto\"\035\n\033FinalizeUpgradeRequestProto" + "\"\036\n\034FinalizeUpgradeResponseProto\"\033\n\031Upgr" + "adeStatusRequestProto\"6\n\032UpgradeStatusRe" + "sponseProto\022\030\n\020upgradeFinalized\030\001 \002(\010\"T\n" + "\032RollingUpgradeRequestProto\0226\n\006action\030\001 " + "\002(\0162&.hadoop.hdfs.RollingUpgradeActionPr" + "oto\"\231\001\n\027RollingUpgradeInfoProto\0226\n\006statu", "s\030\001 \002(\0132&.hadoop.hdfs.RollingUpgradeStat" + "usProto\022\021\n\tstartTime\030\002 \002(\004\022\024\n\014finalizeTi" + "me\030\003 \002(\004\022\035\n\025createdRollbackImages\030\004 \002(\010\"" + "_\n\033RollingUpgradeResponseProto\022@\n\022rollin" + "gUpgradeInfo\030\001 \001(\0132$.hadoop.hdfs.Rolling" + "UpgradeInfoProto\"A\n!ListCorruptFileBlock" + "sRequestProto\022\014\n\004path\030\001 \002(\t\022\016\n\006cookie\030\002 " + "\001(\t\"Z\n\"ListCorruptFileBlocksResponseProt" + "o\0224\n\007corrupt\030\001 \002(\0132#.hadoop.hdfs.Corrupt" + "FileBlocksProto\"(\n\024MetaSaveRequestProto\022", "\020\n\010filename\030\001 \002(\t\"\027\n\025MetaSaveResponsePro" + "to\"&\n\027GetFileInfoRequestProto\022\013\n\003src\030\001 \002" + "(\t\"H\n\030GetFileInfoResponseProto\022,\n\002fs\030\001 \001" + "(\0132 .hadoop.hdfs.HdfsFileStatusProto\"L\n\036" + "GetLocatedFileInfoRequestProto\022\013\n\003src\030\001 " + "\001(\t\022\035\n\016needBlockToken\030\002 \001(\010:\005false\"O\n\037Ge" + "tLocatedFileInfoResponseProto\022,\n\002fs\030\001 \001(" + "\0132 .hadoop.hdfs.HdfsFileStatusProto\"\'\n\030I" + "sFileClosedRequestProto\022\013\n\003src\030\001 \002(\t\"+\n\031" + "IsFileClosedResponseProto\022\016\n\006result\030\001 \002(", "\010\"\232\001\n\027CacheDirectiveInfoProto\022\n\n\002id\030\001 \001(" + "\003\022\014\n\004path\030\002 \001(\t\022\023\n\013replication\030\003 \001(\r\022\014\n\004" + "pool\030\004 \001(\t\022B\n\nexpiration\030\005 \001(\0132..hadoop." + "hdfs.CacheDirectiveInfoExpirationProto\"G" + "\n!CacheDirectiveInfoExpirationProto\022\016\n\006m" + "illis\030\001 \002(\003\022\022\n\nisRelative\030\002 \002(\010\"\202\001\n\030Cach" + "eDirectiveStatsProto\022\023\n\013bytesNeeded\030\001 \002(" + "\003\022\023\n\013bytesCached\030\002 \002(\003\022\023\n\013filesNeeded\030\003 " + "\002(\003\022\023\n\013filesCached\030\004 \002(\003\022\022\n\nhasExpired\030\005" + " \002(\010\"g\n\035AddCacheDirectiveRequestProto\0222\n", "\004info\030\001 \002(\0132$.hadoop.hdfs.CacheDirective" + "InfoProto\022\022\n\ncacheFlags\030\002 \001(\r\",\n\036AddCach" + "eDirectiveResponseProto\022\n\n\002id\030\001 \002(\003\"j\n M" + "odifyCacheDirectiveRequestProto\0222\n\004info\030" + "\001 \002(\0132$.hadoop.hdfs.CacheDirectiveInfoPr" + "oto\022\022\n\ncacheFlags\030\002 \001(\r\"#\n!ModifyCacheDi" + "rectiveResponseProto\".\n RemoveCacheDirec" + "tiveRequestProto\022\n\n\002id\030\001 \002(\003\"#\n!RemoveCa" + "cheDirectiveResponseProto\"g\n\037ListCacheDi" + "rectivesRequestProto\022\016\n\006prevId\030\001 \002(\003\0224\n\006", "filter\030\002 \002(\0132$.hadoop.hdfs.CacheDirectiv" + "eInfoProto\"\204\001\n\030CacheDirectiveEntryProto\022" + "2\n\004info\030\001 \002(\0132$.hadoop.hdfs.CacheDirecti" + "veInfoProto\0224\n\005stats\030\002 \002(\0132%.hadoop.hdfs" + ".CacheDirectiveStatsProto\"l\n ListCacheDi" + "rectivesResponseProto\0227\n\010elements\030\001 \003(\0132" + "%.hadoop.hdfs.CacheDirectiveEntryProto\022\017" + "\n\007hasMore\030\002 \002(\010\"\243\001\n\022CachePoolInfoProto\022\020" + "\n\010poolName\030\001 \001(\t\022\021\n\townerName\030\002 \001(\t\022\021\n\tg" + "roupName\030\003 \001(\t\022\014\n\004mode\030\004 \001(\005\022\r\n\005limit\030\005 ", "\001(\003\022\031\n\021maxRelativeExpiry\030\006 \001(\003\022\035\n\022defaul" + "tReplication\030\007 \001(\r:\0011\"\201\001\n\023CachePoolStats" + "Proto\022\023\n\013bytesNeeded\030\001 \002(\003\022\023\n\013bytesCache" + "d\030\002 \002(\003\022\026\n\016bytesOverlimit\030\003 \002(\003\022\023\n\013files" + "Needed\030\004 \002(\003\022\023\n\013filesCached\030\005 \002(\003\"I\n\030Add" + "CachePoolRequestProto\022-\n\004info\030\001 \002(\0132\037.ha" + "doop.hdfs.CachePoolInfoProto\"\033\n\031AddCache" + "PoolResponseProto\"L\n\033ModifyCachePoolRequ" + "estProto\022-\n\004info\030\001 \002(\0132\037.hadoop.hdfs.Cac" + "hePoolInfoProto\"\036\n\034ModifyCachePoolRespon", "seProto\"/\n\033RemoveCachePoolRequestProto\022\020" + "\n\010poolName\030\001 \002(\t\"\036\n\034RemoveCachePoolRespo" + "nseProto\"2\n\032ListCachePoolsRequestProto\022\024" + "\n\014prevPoolName\030\001 \002(\t\"a\n\033ListCachePoolsRe" + "sponseProto\0221\n\007entries\030\001 \003(\0132 .hadoop.hd" + "fs.CachePoolEntryProto\022\017\n\007hasMore\030\002 \002(\010\"" + "u\n\023CachePoolEntryProto\022-\n\004info\030\001 \002(\0132\037.h" + "adoop.hdfs.CachePoolInfoProto\022/\n\005stats\030\002" + " \002(\0132 .hadoop.hdfs.CachePoolStatsProto\"*" + "\n\033GetFileLinkInfoRequestProto\022\013\n\003src\030\001 \002", "(\t\"L\n\034GetFileLinkInfoResponseProto\022,\n\002fs" + "\030\001 \001(\0132 .hadoop.hdfs.HdfsFileStatusProto" + "\"-\n\035GetContentSummaryRequestProto\022\014\n\004pat" + "h\030\001 \002(\t\"S\n\036GetContentSummaryResponseProt" + "o\0221\n\007summary\030\001 \002(\0132 .hadoop.hdfs.Content" + "SummaryProto\")\n\031GetQuotaUsageRequestProt" + "o\022\014\n\004path\030\001 \002(\t\"I\n\032GetQuotaUsageResponse" + "Proto\022+\n\005usage\030\001 \002(\0132\034.hadoop.hdfs.Quota" + "UsageProto\"\213\001\n\024SetQuotaRequestProto\022\014\n\004p" + "ath\030\001 \002(\t\022\026\n\016namespaceQuota\030\002 \002(\004\022\031\n\021sto", "ragespaceQuota\030\003 \002(\004\0222\n\013storageType\030\004 \001(" + "\0162\035.hadoop.hdfs.StorageTypeProto\"\027\n\025SetQ" + "uotaResponseProto\"`\n\021FsyncRequestProto\022\013" + "\n\003src\030\001 \002(\t\022\016\n\006client\030\002 \002(\t\022\033\n\017lastBlock" + "Length\030\003 \001(\022:\002-1\022\021\n\006fileId\030\004 \001(\004:\0010\"\024\n\022F" + "syncResponseProto\"A\n\024SetTimesRequestProt" + "o\022\013\n\003src\030\001 \002(\t\022\r\n\005mtime\030\002 \002(\004\022\r\n\005atime\030\003" + " \002(\004\"\027\n\025SetTimesResponseProto\"\200\001\n\031Create" + "SymlinkRequestProto\022\016\n\006target\030\001 \002(\t\022\014\n\004l" + "ink\030\002 \002(\t\022/\n\007dirPerm\030\003 \002(\0132\036.hadoop.hdfs", ".FsPermissionProto\022\024\n\014createParent\030\004 \002(\010" + "\"\034\n\032CreateSymlinkResponseProto\")\n\031GetLin" + "kTargetRequestProto\022\014\n\004path\030\001 \002(\t\"0\n\032Get" + "LinkTargetResponseProto\022\022\n\ntargetPath\030\001 " + "\001(\t\"h\n\"UpdateBlockForPipelineRequestProt" + "o\022.\n\005block\030\001 \002(\0132\037.hadoop.hdfs.ExtendedB" + "lockProto\022\022\n\nclientName\030\002 \002(\t\"T\n#UpdateB" + "lockForPipelineResponseProto\022-\n\005block\030\001 " + "\002(\0132\036.hadoop.hdfs.LocatedBlockProto\"\332\001\n\032" + "UpdatePipelineRequestProto\022\022\n\nclientName", "\030\001 \002(\t\0221\n\010oldBlock\030\002 \002(\0132\037.hadoop.hdfs.E" + "xtendedBlockProto\0221\n\010newBlock\030\003 \002(\0132\037.ha" + "doop.hdfs.ExtendedBlockProto\022.\n\010newNodes" + "\030\004 \003(\0132\034.hadoop.hdfs.DatanodeIDProto\022\022\n\n" + "storageIDs\030\005 \003(\t\"\035\n\033UpdatePipelineRespon" + "seProto\"5\n SetBalancerBandwidthRequestPr" + "oto\022\021\n\tbandwidth\030\001 \002(\003\"#\n!SetBalancerBan" + "dwidthResponseProto\"\"\n GetDataEncryption" + "KeyRequestProto\"c\n!GetDataEncryptionKeyR" + "esponseProto\022>\n\021dataEncryptionKey\030\001 \001(\0132", "#.hadoop.hdfs.DataEncryptionKeyProto\"H\n\032" + "CreateSnapshotRequestProto\022\024\n\014snapshotRo" + "ot\030\001 \002(\t\022\024\n\014snapshotName\030\002 \001(\t\"3\n\033Create" + "SnapshotResponseProto\022\024\n\014snapshotPath\030\001 " + "\002(\t\"d\n\032RenameSnapshotRequestProto\022\024\n\014sna" + "pshotRoot\030\001 \002(\t\022\027\n\017snapshotOldName\030\002 \002(\t" + "\022\027\n\017snapshotNewName\030\003 \002(\t\"\035\n\033RenameSnaps" + "hotResponseProto\"1\n\031AllowSnapshotRequest" + "Proto\022\024\n\014snapshotRoot\030\001 \002(\t\"\034\n\032AllowSnap" + "shotResponseProto\"4\n\034DisallowSnapshotReq", "uestProto\022\024\n\014snapshotRoot\030\001 \002(\t\"\037\n\035Disal" + "lowSnapshotResponseProto\"H\n\032DeleteSnapsh" + "otRequestProto\022\024\n\014snapshotRoot\030\001 \002(\t\022\024\n\014" + "snapshotName\030\002 \002(\t\"\035\n\033DeleteSnapshotResp" + "onseProto\"_\n\027CheckAccessRequestProto\022\014\n\004" + "path\030\001 \002(\t\0226\n\004mode\030\002 \002(\0162(.hadoop.hdfs.A" + "clEntryProto.FsActionProto\"\032\n\030CheckAcces" + "sResponseProto\"#\n!GetCurrentEditLogTxidR" + "equestProto\"2\n\"GetCurrentEditLogTxidResp" + "onseProto\022\014\n\004txid\030\001 \002(\003\",\n\034GetEditsFromT", "xidRequestProto\022\014\n\004txid\030\001 \002(\003\"Q\n\035GetEdit" + "sFromTxidResponseProto\0220\n\neventsList\030\001 \002" + "(\0132\034.hadoop.hdfs.EventsListProto\"e\n\031List" + "OpenFilesRequestProto\022\n\n\002id\030\001 \002(\003\022.\n\005typ" + "es\030\002 \003(\0162\037.hadoop.hdfs.OpenFilesTypeProt" + "o\022\014\n\004path\030\003 \001(\t\"b\n\033OpenFilesBatchRespons" + "eProto\022\n\n\002id\030\001 \002(\003\022\014\n\004path\030\002 \002(\t\022\022\n\nclie" + "ntName\030\003 \002(\t\022\025\n\rclientMachine\030\004 \002(\t\"\230\001\n\032" + "ListOpenFilesResponseProto\0229\n\007entries\030\001 " + "\003(\0132(.hadoop.hdfs.OpenFilesBatchResponse", "Proto\022\017\n\007hasMore\030\002 \002(\010\022.\n\005types\030\003 \003(\0162\037." + "hadoop.hdfs.OpenFilesTypeProto\"/\n Satisf" + "yStoragePolicyRequestProto\022\013\n\003src\030\001 \002(\t\"" + "#\n!SatisfyStoragePolicyResponseProto*p\n\017" + "CreateFlagProto\022\n\n\006CREATE\020\001\022\r\n\tOVERWRITE" + "\020\002\022\n\n\006APPEND\020\004\022\020\n\014LAZY_PERSIST\020\020\022\r\n\tNEW_" + "BLOCK\020 \022\025\n\020SHOULD_REPLICATE\020\200\001*C\n\021AddBlo" + "ckFlagProto\022\022\n\016NO_LOCAL_WRITE\020\001\022\032\n\026IGNOR" + "E_CLIENT_LOCALITY\020\002*y\n\027DatanodeReportTyp" + "eProto\022\007\n\003ALL\020\001\022\010\n\004LIVE\020\002\022\010\n\004DEAD\020\003\022\023\n\017D", "ECOMMISSIONING\020\004\022\030\n\024ENTERING_MAINTENANCE" + "\020\005\022\022\n\016IN_MAINTENANCE\020\006*h\n\023SafeModeAction" + "Proto\022\022\n\016SAFEMODE_LEAVE\020\001\022\022\n\016SAFEMODE_EN" + "TER\020\002\022\020\n\014SAFEMODE_GET\020\003\022\027\n\023SAFEMODE_FORC" + "E_EXIT\020\004*?\n\031RollingUpgradeActionProto\022\t\n" + "\005QUERY\020\001\022\t\n\005START\020\002\022\014\n\010FINALIZE\020\003*\033\n\016Cac" + "heFlagProto\022\t\n\005FORCE\020\001*C\n\022OpenFilesTypeP" + "roto\022\022\n\016ALL_OPEN_FILES\020\001\022\031\n\025BLOCKING_DEC" + "OMMISSION\020\0022\306U\n\026ClientNamenodeProtocol\022l" + "\n\021getBlockLocations\022*.hadoop.hdfs.GetBlo", "ckLocationsRequestProto\032+.hadoop.hdfs.Ge" + "tBlockLocationsResponseProto\022l\n\021getServe" + "rDefaults\022*.hadoop.hdfs.GetServerDefault" + "sRequestProto\032+.hadoop.hdfs.GetServerDef" + "aultsResponseProto\022K\n\006create\022\037.hadoop.hd" + "fs.CreateRequestProto\032 .hadoop.hdfs.Crea" + "teResponseProto\022K\n\006append\022\037.hadoop.hdfs." + "AppendRequestProto\032 .hadoop.hdfs.AppendR" + "esponseProto\022c\n\016setReplication\022\'.hadoop." + "hdfs.SetReplicationRequestProto\032(.hadoop", ".hdfs.SetReplicationResponseProto\022i\n\020set" + "StoragePolicy\022).hadoop.hdfs.SetStoragePo" + "licyRequestProto\032*.hadoop.hdfs.SetStorag" + "ePolicyResponseProto\022o\n\022unsetStoragePoli" + "cy\022+.hadoop.hdfs.UnsetStoragePolicyReque" + "stProto\032,.hadoop.hdfs.UnsetStoragePolicy" + "ResponseProto\022i\n\020getStoragePolicy\022).hado" + "op.hdfs.GetStoragePolicyRequestProto\032*.h" + "adoop.hdfs.GetStoragePolicyResponseProto" + "\022o\n\022getStoragePolicies\022+.hadoop.hdfs.Get", "StoragePoliciesRequestProto\032,.hadoop.hdf" + "s.GetStoragePoliciesResponseProto\022`\n\rset" + "Permission\022&.hadoop.hdfs.SetPermissionRe" + "questProto\032\'.hadoop.hdfs.SetPermissionRe" + "sponseProto\022Q\n\010setOwner\022!.hadoop.hdfs.Se" + "tOwnerRequestProto\032\".hadoop.hdfs.SetOwne" + "rResponseProto\022]\n\014abandonBlock\022%.hadoop." + "hdfs.AbandonBlockRequestProto\032&.hadoop.h" + "dfs.AbandonBlockResponseProto\022Q\n\010addBloc" + "k\022!.hadoop.hdfs.AddBlockRequestProto\032\".h", "adoop.hdfs.AddBlockResponseProto\022x\n\025getA" + "dditionalDatanode\022..hadoop.hdfs.GetAddit" + "ionalDatanodeRequestProto\032/.hadoop.hdfs." + "GetAdditionalDatanodeResponseProto\022Q\n\010co" + "mplete\022!.hadoop.hdfs.CompleteRequestProt" + "o\032\".hadoop.hdfs.CompleteResponseProto\022f\n" + "\017reportBadBlocks\022(.hadoop.hdfs.ReportBad" + "BlocksRequestProto\032).hadoop.hdfs.ReportB" + "adBlocksResponseProto\022K\n\006concat\022\037.hadoop" + ".hdfs.ConcatRequestProto\032 .hadoop.hdfs.C", "oncatResponseProto\022Q\n\010truncate\022!.hadoop." + "hdfs.TruncateRequestProto\032\".hadoop.hdfs." + "TruncateResponseProto\022K\n\006rename\022\037.hadoop" + ".hdfs.RenameRequestProto\032 .hadoop.hdfs.R" + "enameResponseProto\022N\n\007rename2\022 .hadoop.h" + "dfs.Rename2RequestProto\032!.hadoop.hdfs.Re" + "name2ResponseProto\022K\n\006delete\022\037.hadoop.hd" + "fs.DeleteRequestProto\032 .hadoop.hdfs.Dele" + "teResponseProto\022K\n\006mkdirs\022\037.hadoop.hdfs." + "MkdirsRequestProto\032 .hadoop.hdfs.MkdirsR", "esponseProto\022W\n\ngetListing\022#.hadoop.hdfs" + ".GetListingRequestProto\032$.hadoop.hdfs.Ge" + "tListingResponseProto\022W\n\nrenewLease\022#.ha" + "doop.hdfs.RenewLeaseRequestProto\032$.hadoo" + "p.hdfs.RenewLeaseResponseProto\022]\n\014recove" + "rLease\022%.hadoop.hdfs.RecoverLeaseRequest" + "Proto\032&.hadoop.hdfs.RecoverLeaseResponse" + "Proto\022X\n\ngetFsStats\022$.hadoop.hdfs.GetFsS" + "tatusRequestProto\032$.hadoop.hdfs.GetFsSta" + "tsResponseProto\022\204\001\n\031getFsReplicatedBlock", "Stats\0222.hadoop.hdfs.GetFsReplicatedBlock" + "StatsRequestProto\0323.hadoop.hdfs.GetFsRep" + "licatedBlockStatsResponseProto\022{\n\026getFsE" + "CBlockGroupStats\022/.hadoop.hdfs.GetFsECBl" + "ockGroupStatsRequestProto\0320.hadoop.hdfs." + "GetFsECBlockGroupStatsResponseProto\022l\n\021g" + "etDatanodeReport\022*.hadoop.hdfs.GetDatano" + "deReportRequestProto\032+.hadoop.hdfs.GetDa" + "tanodeReportResponseProto\022\201\001\n\030getDatanod" + "eStorageReport\0221.hadoop.hdfs.GetDatanode", "StorageReportRequestProto\0322.hadoop.hdfs." + "GetDatanodeStorageReportResponseProto\022x\n" + "\025getPreferredBlockSize\022..hadoop.hdfs.Get" + "PreferredBlockSizeRequestProto\032/.hadoop." + "hdfs.GetPreferredBlockSizeResponseProto\022" + "Z\n\013setSafeMode\022$.hadoop.hdfs.SetSafeMode" + "RequestProto\032%.hadoop.hdfs.SetSafeModeRe" + "sponseProto\022`\n\rsaveNamespace\022&.hadoop.hd" + "fs.SaveNamespaceRequestProto\032\'.hadoop.hd" + "fs.SaveNamespaceResponseProto\022T\n\trollEdi", "ts\022\".hadoop.hdfs.RollEditsRequestProto\032#" + ".hadoop.hdfs.RollEditsResponseProto\022u\n\024r" + "estoreFailedStorage\022-.hadoop.hdfs.Restor" + "eFailedStorageRequestProto\032..hadoop.hdfs" + ".RestoreFailedStorageResponseProto\022]\n\014re" + "freshNodes\022%.hadoop.hdfs.RefreshNodesReq" + "uestProto\032&.hadoop.hdfs.RefreshNodesResp" + "onseProto\022f\n\017finalizeUpgrade\022(.hadoop.hd" + "fs.FinalizeUpgradeRequestProto\032).hadoop." + "hdfs.FinalizeUpgradeResponseProto\022`\n\rupg", "radeStatus\022&.hadoop.hdfs.UpgradeStatusRe" + "questProto\032\'.hadoop.hdfs.UpgradeStatusRe" + "sponseProto\022c\n\016rollingUpgrade\022\'.hadoop.h" + "dfs.RollingUpgradeRequestProto\032(.hadoop." + "hdfs.RollingUpgradeResponseProto\022x\n\025list" + "CorruptFileBlocks\022..hadoop.hdfs.ListCorr" + "uptFileBlocksRequestProto\032/.hadoop.hdfs." + "ListCorruptFileBlocksResponseProto\022Q\n\010me" + "taSave\022!.hadoop.hdfs.MetaSaveRequestProt" + "o\032\".hadoop.hdfs.MetaSaveResponseProto\022Z\n", "\013getFileInfo\022$.hadoop.hdfs.GetFileInfoRe" + "questProto\032%.hadoop.hdfs.GetFileInfoResp" + "onseProto\022o\n\022getLocatedFileInfo\022+.hadoop" + ".hdfs.GetLocatedFileInfoRequestProto\032,.h" + "adoop.hdfs.GetLocatedFileInfoResponsePro" + "to\022l\n\021addCacheDirective\022*.hadoop.hdfs.Ad" + "dCacheDirectiveRequestProto\032+.hadoop.hdf" + "s.AddCacheDirectiveResponseProto\022u\n\024modi" + "fyCacheDirective\022-.hadoop.hdfs.ModifyCac" + "heDirectiveRequestProto\032..hadoop.hdfs.Mo", "difyCacheDirectiveResponseProto\022u\n\024remov" + "eCacheDirective\022-.hadoop.hdfs.RemoveCach" + "eDirectiveRequestProto\032..hadoop.hdfs.Rem" + "oveCacheDirectiveResponseProto\022r\n\023listCa" + "cheDirectives\022,.hadoop.hdfs.ListCacheDir" + "ectivesRequestProto\032-.hadoop.hdfs.ListCa" + "cheDirectivesResponseProto\022]\n\014addCachePo" + "ol\022%.hadoop.hdfs.AddCachePoolRequestProt" + "o\032&.hadoop.hdfs.AddCachePoolResponseProt" + "o\022f\n\017modifyCachePool\022(.hadoop.hdfs.Modif", "yCachePoolRequestProto\032).hadoop.hdfs.Mod" + "ifyCachePoolResponseProto\022f\n\017removeCache" + "Pool\022(.hadoop.hdfs.RemoveCachePoolReques" + "tProto\032).hadoop.hdfs.RemoveCachePoolResp" + "onseProto\022c\n\016listCachePools\022\'.hadoop.hdf" + "s.ListCachePoolsRequestProto\032(.hadoop.hd" + "fs.ListCachePoolsResponseProto\022f\n\017getFil" + "eLinkInfo\022(.hadoop.hdfs.GetFileLinkInfoR" + "equestProto\032).hadoop.hdfs.GetFileLinkInf" + "oResponseProto\022l\n\021getContentSummary\022*.ha", "doop.hdfs.GetContentSummaryRequestProto\032" + "+.hadoop.hdfs.GetContentSummaryResponseP" + "roto\022Q\n\010setQuota\022!.hadoop.hdfs.SetQuotaR" + "equestProto\032\".hadoop.hdfs.SetQuotaRespon" + "seProto\022H\n\005fsync\022\036.hadoop.hdfs.FsyncRequ" + "estProto\032\037.hadoop.hdfs.FsyncResponseProt" + "o\022Q\n\010setTimes\022!.hadoop.hdfs.SetTimesRequ" + "estProto\032\".hadoop.hdfs.SetTimesResponseP" + "roto\022`\n\rcreateSymlink\022&.hadoop.hdfs.Crea" + "teSymlinkRequestProto\032\'.hadoop.hdfs.Crea", "teSymlinkResponseProto\022`\n\rgetLinkTarget\022" + "&.hadoop.hdfs.GetLinkTargetRequestProto\032" + "\'.hadoop.hdfs.GetLinkTargetResponseProto" + "\022{\n\026updateBlockForPipeline\022/.hadoop.hdfs" + ".UpdateBlockForPipelineRequestProto\0320.ha" + "doop.hdfs.UpdateBlockForPipelineResponse" + "Proto\022c\n\016updatePipeline\022\'.hadoop.hdfs.Up" + "datePipelineRequestProto\032(.hadoop.hdfs.U" + "pdatePipelineResponseProto\022s\n\022getDelegat" + "ionToken\022-.hadoop.common.GetDelegationTo", "kenRequestProto\032..hadoop.common.GetDeleg" + "ationTokenResponseProto\022y\n\024renewDelegati" + "onToken\022/.hadoop.common.RenewDelegationT" + "okenRequestProto\0320.hadoop.common.RenewDe" + "legationTokenResponseProto\022|\n\025cancelDele" + "gationToken\0220.hadoop.common.CancelDelega" + "tionTokenRequestProto\0321.hadoop.common.Ca" + "ncelDelegationTokenResponseProto\022u\n\024setB" + "alancerBandwidth\022-.hadoop.hdfs.SetBalanc" + "erBandwidthRequestProto\032..hadoop.hdfs.Se", "tBalancerBandwidthResponseProto\022u\n\024getDa" + "taEncryptionKey\022-.hadoop.hdfs.GetDataEnc" + "ryptionKeyRequestProto\032..hadoop.hdfs.Get" + "DataEncryptionKeyResponseProto\022c\n\016create" + "Snapshot\022\'.hadoop.hdfs.CreateSnapshotReq" + "uestProto\032(.hadoop.hdfs.CreateSnapshotRe" + "sponseProto\022c\n\016renameSnapshot\022\'.hadoop.h" + "dfs.RenameSnapshotRequestProto\032(.hadoop." + "hdfs.RenameSnapshotResponseProto\022`\n\rallo" + "wSnapshot\022&.hadoop.hdfs.AllowSnapshotReq", "uestProto\032\'.hadoop.hdfs.AllowSnapshotRes" + "ponseProto\022i\n\020disallowSnapshot\022).hadoop." + "hdfs.DisallowSnapshotRequestProto\032*.hado" + "op.hdfs.DisallowSnapshotResponseProto\022\207\001" + "\n\032getSnapshottableDirListing\0223.hadoop.hd" + "fs.GetSnapshottableDirListingRequestProt" + "o\0324.hadoop.hdfs.GetSnapshottableDirListi" + "ngResponseProto\022c\n\016deleteSnapshot\022\'.hado" + "op.hdfs.DeleteSnapshotRequestProto\032(.had" + "oop.hdfs.DeleteSnapshotResponseProto\022x\n\025", "getSnapshotDiffReport\022..hadoop.hdfs.GetS" + "napshotDiffReportRequestProto\032/.hadoop.h" + "dfs.GetSnapshotDiffReportResponseProto\022\215" + "\001\n\034getSnapshotDiffReportListing\0225.hadoop" + ".hdfs.GetSnapshotDiffReportListingReques" + "tProto\0326.hadoop.hdfs.GetSnapshotDiffRepo" + "rtListingResponseProto\022]\n\014isFileClosed\022%" + ".hadoop.hdfs.IsFileClosedRequestProto\032&." + "hadoop.hdfs.IsFileClosedResponseProto\022i\n" + "\020modifyAclEntries\022).hadoop.hdfs.ModifyAc", "lEntriesRequestProto\032*.hadoop.hdfs.Modif" + "yAclEntriesResponseProto\022i\n\020removeAclEnt" + "ries\022).hadoop.hdfs.RemoveAclEntriesReque" + "stProto\032*.hadoop.hdfs.RemoveAclEntriesRe" + "sponseProto\022i\n\020removeDefaultAcl\022).hadoop" + ".hdfs.RemoveDefaultAclRequestProto\032*.had" + "oop.hdfs.RemoveDefaultAclResponseProto\022T" + "\n\tremoveAcl\022\".hadoop.hdfs.RemoveAclReque" + "stProto\032#.hadoop.hdfs.RemoveAclResponseP" + "roto\022K\n\006setAcl\022\037.hadoop.hdfs.SetAclReque", "stProto\032 .hadoop.hdfs.SetAclResponseProt" + "o\022]\n\014getAclStatus\022%.hadoop.hdfs.GetAclSt" + "atusRequestProto\032&.hadoop.hdfs.GetAclSta" + "tusResponseProto\022Q\n\010setXAttr\022!.hadoop.hd" + "fs.SetXAttrRequestProto\032\".hadoop.hdfs.Se" + "tXAttrResponseProto\022T\n\tgetXAttrs\022\".hadoo" + "p.hdfs.GetXAttrsRequestProto\032#.hadoop.hd" + "fs.GetXAttrsResponseProto\022W\n\nlistXAttrs\022" + "#.hadoop.hdfs.ListXAttrsRequestProto\032$.h" + "adoop.hdfs.ListXAttrsResponseProto\022Z\n\013re", "moveXAttr\022$.hadoop.hdfs.RemoveXAttrReque" + "stProto\032%.hadoop.hdfs.RemoveXAttrRespons" + "eProto\022Z\n\013checkAccess\022$.hadoop.hdfs.Chec" + "kAccessRequestProto\032%.hadoop.hdfs.CheckA" + "ccessResponseProto\022u\n\024createEncryptionZo" + "ne\022-.hadoop.hdfs.CreateEncryptionZoneReq" + "uestProto\032..hadoop.hdfs.CreateEncryption" + "ZoneResponseProto\022r\n\023listEncryptionZones" + "\022,.hadoop.hdfs.ListEncryptionZonesReques" + "tProto\032-.hadoop.hdfs.ListEncryptionZones", "ResponseProto\022~\n\027reencryptEncryptionZone" + "\0220.hadoop.hdfs.ReencryptEncryptionZoneRe" + "questProto\0321.hadoop.hdfs.ReencryptEncryp" + "tionZoneResponseProto\022{\n\026listReencryptio" + "nStatus\022/.hadoop.hdfs.ListReencryptionSt" + "atusRequestProto\0320.hadoop.hdfs.ListReenc" + "ryptionStatusResponseProto\022]\n\014getEZForPa" + "th\022%.hadoop.hdfs.GetEZForPathRequestProt" + "o\032&.hadoop.hdfs.GetEZForPathResponseProt" + "o\022{\n\026setErasureCodingPolicy\022/.hadoop.hdf", "s.SetErasureCodingPolicyRequestProto\0320.h" + "adoop.hdfs.SetErasureCodingPolicyRespons" + "eProto\022\201\001\n\030unsetErasureCodingPolicy\0221.ha" + "doop.hdfs.UnsetErasureCodingPolicyReques" + "tProto\0322.hadoop.hdfs.UnsetErasureCodingP" + "olicyResponseProto\022x\n\025getCurrentEditLogT" + "xid\022..hadoop.hdfs.GetCurrentEditLogTxidR" + "equestProto\032/.hadoop.hdfs.GetCurrentEdit" + "LogTxidResponseProto\022i\n\020getEditsFromTxid" + "\022).hadoop.hdfs.GetEditsFromTxidRequestPr", "oto\032*.hadoop.hdfs.GetEditsFromTxidRespon" + "seProto\022\201\001\n\030getErasureCodingPolicies\0221.h" + "adoop.hdfs.GetErasureCodingPoliciesReque" + "stProto\0322.hadoop.hdfs.GetErasureCodingPo" + "liciesResponseProto\022\201\001\n\030addErasureCoding" + "Policies\0221.hadoop.hdfs.AddErasureCodingP" + "oliciesRequestProto\0322.hadoop.hdfs.AddEra" + "sureCodingPoliciesResponseProto\022\204\001\n\031remo" + "veErasureCodingPolicy\0222.hadoop.hdfs.Remo" + "veErasureCodingPolicyRequestProto\0323.hado", "op.hdfs.RemoveErasureCodingPolicyRespons" + "eProto\022\204\001\n\031enableErasureCodingPolicy\0222.h" + "adoop.hdfs.EnableErasureCodingPolicyRequ" + "estProto\0323.hadoop.hdfs.EnableErasureCodi" + "ngPolicyResponseProto\022\207\001\n\032disableErasure" + "CodingPolicy\0223.hadoop.hdfs.DisableErasur" + "eCodingPolicyRequestProto\0324.hadoop.hdfs." + "DisableErasureCodingPolicyResponseProto\022" + "{\n\026getErasureCodingPolicy\022/.hadoop.hdfs." + "GetErasureCodingPolicyRequestProto\0320.had", "oop.hdfs.GetErasureCodingPolicyResponseP" + "roto\022{\n\026getErasureCodingCodecs\022/.hadoop." + "hdfs.GetErasureCodingCodecsRequestProto\032" + "0.hadoop.hdfs.GetErasureCodingCodecsResp" + "onseProto\022`\n\rgetQuotaUsage\022&.hadoop.hdfs" + ".GetQuotaUsageRequestProto\032\'.hadoop.hdfs" + ".GetQuotaUsageResponseProto\022`\n\rlistOpenF" + "iles\022&.hadoop.hdfs.ListOpenFilesRequestP" + "roto\032\'.hadoop.hdfs.ListOpenFilesResponse" + "Proto\022u\n\024satisfyStoragePolicy\022-.hadoop.h", "dfs.SatisfyStoragePolicyRequestProto\032..h" + "adoop.hdfs.SatisfyStoragePolicyResponseP" + "rotoBK\n%org.apache.hadoop.hdfs.protocol." + "protoB\034ClientNamenodeProtocolProtos\210\001\001\240\001" + "\001" }; io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public io.prestosql.hadoop.$internal.com.google.protobuf.ExtensionRegistry assignDescriptors( io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; internal_static_hadoop_hdfs_GetBlockLocationsRequestProto_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_hadoop_hdfs_GetBlockLocationsRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetBlockLocationsRequestProto_descriptor, new java.lang.String[] { "Src", "Offset", "Length", }); internal_static_hadoop_hdfs_GetBlockLocationsResponseProto_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_hadoop_hdfs_GetBlockLocationsResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetBlockLocationsResponseProto_descriptor, new java.lang.String[] { "Locations", }); internal_static_hadoop_hdfs_GetServerDefaultsRequestProto_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_hadoop_hdfs_GetServerDefaultsRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetServerDefaultsRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetServerDefaultsResponseProto_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_hadoop_hdfs_GetServerDefaultsResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetServerDefaultsResponseProto_descriptor, new java.lang.String[] { "ServerDefaults", }); internal_static_hadoop_hdfs_CreateRequestProto_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hadoop_hdfs_CreateRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CreateRequestProto_descriptor, new java.lang.String[] { "Src", "Masked", "ClientName", "CreateFlag", "CreateParent", "Replication", "BlockSize", "CryptoProtocolVersion", "Unmasked", "EcPolicyName", }); internal_static_hadoop_hdfs_CreateResponseProto_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_hadoop_hdfs_CreateResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CreateResponseProto_descriptor, new java.lang.String[] { "Fs", }); internal_static_hadoop_hdfs_AppendRequestProto_descriptor = getDescriptor().getMessageTypes().get(6); internal_static_hadoop_hdfs_AppendRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AppendRequestProto_descriptor, new java.lang.String[] { "Src", "ClientName", "Flag", }); internal_static_hadoop_hdfs_AppendResponseProto_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_hadoop_hdfs_AppendResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AppendResponseProto_descriptor, new java.lang.String[] { "Block", "Stat", }); internal_static_hadoop_hdfs_SetReplicationRequestProto_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_hadoop_hdfs_SetReplicationRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetReplicationRequestProto_descriptor, new java.lang.String[] { "Src", "Replication", }); internal_static_hadoop_hdfs_SetReplicationResponseProto_descriptor = getDescriptor().getMessageTypes().get(9); internal_static_hadoop_hdfs_SetReplicationResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetReplicationResponseProto_descriptor, new java.lang.String[] { "Result", }); internal_static_hadoop_hdfs_SetStoragePolicyRequestProto_descriptor = getDescriptor().getMessageTypes().get(10); internal_static_hadoop_hdfs_SetStoragePolicyRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetStoragePolicyRequestProto_descriptor, new java.lang.String[] { "Src", "PolicyName", }); internal_static_hadoop_hdfs_SetStoragePolicyResponseProto_descriptor = getDescriptor().getMessageTypes().get(11); internal_static_hadoop_hdfs_SetStoragePolicyResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetStoragePolicyResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_UnsetStoragePolicyRequestProto_descriptor = getDescriptor().getMessageTypes().get(12); internal_static_hadoop_hdfs_UnsetStoragePolicyRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_UnsetStoragePolicyRequestProto_descriptor, new java.lang.String[] { "Src", }); internal_static_hadoop_hdfs_UnsetStoragePolicyResponseProto_descriptor = getDescriptor().getMessageTypes().get(13); internal_static_hadoop_hdfs_UnsetStoragePolicyResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_UnsetStoragePolicyResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetStoragePolicyRequestProto_descriptor = getDescriptor().getMessageTypes().get(14); internal_static_hadoop_hdfs_GetStoragePolicyRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetStoragePolicyRequestProto_descriptor, new java.lang.String[] { "Path", }); internal_static_hadoop_hdfs_GetStoragePolicyResponseProto_descriptor = getDescriptor().getMessageTypes().get(15); internal_static_hadoop_hdfs_GetStoragePolicyResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetStoragePolicyResponseProto_descriptor, new java.lang.String[] { "StoragePolicy", }); internal_static_hadoop_hdfs_GetStoragePoliciesRequestProto_descriptor = getDescriptor().getMessageTypes().get(16); internal_static_hadoop_hdfs_GetStoragePoliciesRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetStoragePoliciesRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetStoragePoliciesResponseProto_descriptor = getDescriptor().getMessageTypes().get(17); internal_static_hadoop_hdfs_GetStoragePoliciesResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetStoragePoliciesResponseProto_descriptor, new java.lang.String[] { "Policies", }); internal_static_hadoop_hdfs_SetPermissionRequestProto_descriptor = getDescriptor().getMessageTypes().get(18); internal_static_hadoop_hdfs_SetPermissionRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetPermissionRequestProto_descriptor, new java.lang.String[] { "Src", "Permission", }); internal_static_hadoop_hdfs_SetPermissionResponseProto_descriptor = getDescriptor().getMessageTypes().get(19); internal_static_hadoop_hdfs_SetPermissionResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetPermissionResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_SetOwnerRequestProto_descriptor = getDescriptor().getMessageTypes().get(20); internal_static_hadoop_hdfs_SetOwnerRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetOwnerRequestProto_descriptor, new java.lang.String[] { "Src", "Username", "Groupname", }); internal_static_hadoop_hdfs_SetOwnerResponseProto_descriptor = getDescriptor().getMessageTypes().get(21); internal_static_hadoop_hdfs_SetOwnerResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetOwnerResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_AbandonBlockRequestProto_descriptor = getDescriptor().getMessageTypes().get(22); internal_static_hadoop_hdfs_AbandonBlockRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AbandonBlockRequestProto_descriptor, new java.lang.String[] { "B", "Src", "Holder", "FileId", }); internal_static_hadoop_hdfs_AbandonBlockResponseProto_descriptor = getDescriptor().getMessageTypes().get(23); internal_static_hadoop_hdfs_AbandonBlockResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AbandonBlockResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_AddBlockRequestProto_descriptor = getDescriptor().getMessageTypes().get(24); internal_static_hadoop_hdfs_AddBlockRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AddBlockRequestProto_descriptor, new java.lang.String[] { "Src", "ClientName", "Previous", "ExcludeNodes", "FileId", "FavoredNodes", "Flags", }); internal_static_hadoop_hdfs_AddBlockResponseProto_descriptor = getDescriptor().getMessageTypes().get(25); internal_static_hadoop_hdfs_AddBlockResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AddBlockResponseProto_descriptor, new java.lang.String[] { "Block", }); internal_static_hadoop_hdfs_GetAdditionalDatanodeRequestProto_descriptor = getDescriptor().getMessageTypes().get(26); internal_static_hadoop_hdfs_GetAdditionalDatanodeRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetAdditionalDatanodeRequestProto_descriptor, new java.lang.String[] { "Src", "Blk", "Existings", "Excludes", "NumAdditionalNodes", "ClientName", "ExistingStorageUuids", "FileId", }); internal_static_hadoop_hdfs_GetAdditionalDatanodeResponseProto_descriptor = getDescriptor().getMessageTypes().get(27); internal_static_hadoop_hdfs_GetAdditionalDatanodeResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetAdditionalDatanodeResponseProto_descriptor, new java.lang.String[] { "Block", }); internal_static_hadoop_hdfs_CompleteRequestProto_descriptor = getDescriptor().getMessageTypes().get(28); internal_static_hadoop_hdfs_CompleteRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CompleteRequestProto_descriptor, new java.lang.String[] { "Src", "ClientName", "Last", "FileId", }); internal_static_hadoop_hdfs_CompleteResponseProto_descriptor = getDescriptor().getMessageTypes().get(29); internal_static_hadoop_hdfs_CompleteResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CompleteResponseProto_descriptor, new java.lang.String[] { "Result", }); internal_static_hadoop_hdfs_ReportBadBlocksRequestProto_descriptor = getDescriptor().getMessageTypes().get(30); internal_static_hadoop_hdfs_ReportBadBlocksRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ReportBadBlocksRequestProto_descriptor, new java.lang.String[] { "Blocks", }); internal_static_hadoop_hdfs_ReportBadBlocksResponseProto_descriptor = getDescriptor().getMessageTypes().get(31); internal_static_hadoop_hdfs_ReportBadBlocksResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ReportBadBlocksResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_ConcatRequestProto_descriptor = getDescriptor().getMessageTypes().get(32); internal_static_hadoop_hdfs_ConcatRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ConcatRequestProto_descriptor, new java.lang.String[] { "Trg", "Srcs", }); internal_static_hadoop_hdfs_ConcatResponseProto_descriptor = getDescriptor().getMessageTypes().get(33); internal_static_hadoop_hdfs_ConcatResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ConcatResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_TruncateRequestProto_descriptor = getDescriptor().getMessageTypes().get(34); internal_static_hadoop_hdfs_TruncateRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_TruncateRequestProto_descriptor, new java.lang.String[] { "Src", "NewLength", "ClientName", }); internal_static_hadoop_hdfs_TruncateResponseProto_descriptor = getDescriptor().getMessageTypes().get(35); internal_static_hadoop_hdfs_TruncateResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_TruncateResponseProto_descriptor, new java.lang.String[] { "Result", }); internal_static_hadoop_hdfs_RenameRequestProto_descriptor = getDescriptor().getMessageTypes().get(36); internal_static_hadoop_hdfs_RenameRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RenameRequestProto_descriptor, new java.lang.String[] { "Src", "Dst", }); internal_static_hadoop_hdfs_RenameResponseProto_descriptor = getDescriptor().getMessageTypes().get(37); internal_static_hadoop_hdfs_RenameResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RenameResponseProto_descriptor, new java.lang.String[] { "Result", }); internal_static_hadoop_hdfs_Rename2RequestProto_descriptor = getDescriptor().getMessageTypes().get(38); internal_static_hadoop_hdfs_Rename2RequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_Rename2RequestProto_descriptor, new java.lang.String[] { "Src", "Dst", "OverwriteDest", "MoveToTrash", }); internal_static_hadoop_hdfs_Rename2ResponseProto_descriptor = getDescriptor().getMessageTypes().get(39); internal_static_hadoop_hdfs_Rename2ResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_Rename2ResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_DeleteRequestProto_descriptor = getDescriptor().getMessageTypes().get(40); internal_static_hadoop_hdfs_DeleteRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DeleteRequestProto_descriptor, new java.lang.String[] { "Src", "Recursive", }); internal_static_hadoop_hdfs_DeleteResponseProto_descriptor = getDescriptor().getMessageTypes().get(41); internal_static_hadoop_hdfs_DeleteResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DeleteResponseProto_descriptor, new java.lang.String[] { "Result", }); internal_static_hadoop_hdfs_MkdirsRequestProto_descriptor = getDescriptor().getMessageTypes().get(42); internal_static_hadoop_hdfs_MkdirsRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_MkdirsRequestProto_descriptor, new java.lang.String[] { "Src", "Masked", "CreateParent", "Unmasked", }); internal_static_hadoop_hdfs_MkdirsResponseProto_descriptor = getDescriptor().getMessageTypes().get(43); internal_static_hadoop_hdfs_MkdirsResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_MkdirsResponseProto_descriptor, new java.lang.String[] { "Result", }); internal_static_hadoop_hdfs_GetListingRequestProto_descriptor = getDescriptor().getMessageTypes().get(44); internal_static_hadoop_hdfs_GetListingRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetListingRequestProto_descriptor, new java.lang.String[] { "Src", "StartAfter", "NeedLocation", }); internal_static_hadoop_hdfs_GetListingResponseProto_descriptor = getDescriptor().getMessageTypes().get(45); internal_static_hadoop_hdfs_GetListingResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetListingResponseProto_descriptor, new java.lang.String[] { "DirList", }); internal_static_hadoop_hdfs_GetSnapshottableDirListingRequestProto_descriptor = getDescriptor().getMessageTypes().get(46); internal_static_hadoop_hdfs_GetSnapshottableDirListingRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetSnapshottableDirListingRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetSnapshottableDirListingResponseProto_descriptor = getDescriptor().getMessageTypes().get(47); internal_static_hadoop_hdfs_GetSnapshottableDirListingResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetSnapshottableDirListingResponseProto_descriptor, new java.lang.String[] { "SnapshottableDirList", }); internal_static_hadoop_hdfs_GetSnapshotDiffReportRequestProto_descriptor = getDescriptor().getMessageTypes().get(48); internal_static_hadoop_hdfs_GetSnapshotDiffReportRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetSnapshotDiffReportRequestProto_descriptor, new java.lang.String[] { "SnapshotRoot", "FromSnapshot", "ToSnapshot", }); internal_static_hadoop_hdfs_GetSnapshotDiffReportResponseProto_descriptor = getDescriptor().getMessageTypes().get(49); internal_static_hadoop_hdfs_GetSnapshotDiffReportResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetSnapshotDiffReportResponseProto_descriptor, new java.lang.String[] { "DiffReport", }); internal_static_hadoop_hdfs_GetSnapshotDiffReportListingRequestProto_descriptor = getDescriptor().getMessageTypes().get(50); internal_static_hadoop_hdfs_GetSnapshotDiffReportListingRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetSnapshotDiffReportListingRequestProto_descriptor, new java.lang.String[] { "SnapshotRoot", "FromSnapshot", "ToSnapshot", "Cursor", }); internal_static_hadoop_hdfs_GetSnapshotDiffReportListingResponseProto_descriptor = getDescriptor().getMessageTypes().get(51); internal_static_hadoop_hdfs_GetSnapshotDiffReportListingResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetSnapshotDiffReportListingResponseProto_descriptor, new java.lang.String[] { "DiffReport", }); internal_static_hadoop_hdfs_RenewLeaseRequestProto_descriptor = getDescriptor().getMessageTypes().get(52); internal_static_hadoop_hdfs_RenewLeaseRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RenewLeaseRequestProto_descriptor, new java.lang.String[] { "ClientName", }); internal_static_hadoop_hdfs_RenewLeaseResponseProto_descriptor = getDescriptor().getMessageTypes().get(53); internal_static_hadoop_hdfs_RenewLeaseResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RenewLeaseResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_RecoverLeaseRequestProto_descriptor = getDescriptor().getMessageTypes().get(54); internal_static_hadoop_hdfs_RecoverLeaseRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RecoverLeaseRequestProto_descriptor, new java.lang.String[] { "Src", "ClientName", }); internal_static_hadoop_hdfs_RecoverLeaseResponseProto_descriptor = getDescriptor().getMessageTypes().get(55); internal_static_hadoop_hdfs_RecoverLeaseResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RecoverLeaseResponseProto_descriptor, new java.lang.String[] { "Result", }); internal_static_hadoop_hdfs_GetFsStatusRequestProto_descriptor = getDescriptor().getMessageTypes().get(56); internal_static_hadoop_hdfs_GetFsStatusRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetFsStatusRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetFsStatsResponseProto_descriptor = getDescriptor().getMessageTypes().get(57); internal_static_hadoop_hdfs_GetFsStatsResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetFsStatsResponseProto_descriptor, new java.lang.String[] { "Capacity", "Used", "Remaining", "UnderReplicated", "CorruptBlocks", "MissingBlocks", "MissingReplOneBlocks", "BlocksInFuture", "PendingDeletionBlocks", }); internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsRequestProto_descriptor = getDescriptor().getMessageTypes().get(58); internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsResponseProto_descriptor = getDescriptor().getMessageTypes().get(59); internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetFsReplicatedBlockStatsResponseProto_descriptor, new java.lang.String[] { "LowRedundancy", "CorruptBlocks", "MissingBlocks", "MissingReplOneBlocks", "BlocksInFuture", "PendingDeletionBlocks", "HighestPrioLowRedundancyBlocks", }); internal_static_hadoop_hdfs_GetFsECBlockGroupStatsRequestProto_descriptor = getDescriptor().getMessageTypes().get(60); internal_static_hadoop_hdfs_GetFsECBlockGroupStatsRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetFsECBlockGroupStatsRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetFsECBlockGroupStatsResponseProto_descriptor = getDescriptor().getMessageTypes().get(61); internal_static_hadoop_hdfs_GetFsECBlockGroupStatsResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetFsECBlockGroupStatsResponseProto_descriptor, new java.lang.String[] { "LowRedundancy", "CorruptBlocks", "MissingBlocks", "BlocksInFuture", "PendingDeletionBlocks", "HighestPrioLowRedundancyBlocks", }); internal_static_hadoop_hdfs_GetDatanodeReportRequestProto_descriptor = getDescriptor().getMessageTypes().get(62); internal_static_hadoop_hdfs_GetDatanodeReportRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetDatanodeReportRequestProto_descriptor, new java.lang.String[] { "Type", }); internal_static_hadoop_hdfs_GetDatanodeReportResponseProto_descriptor = getDescriptor().getMessageTypes().get(63); internal_static_hadoop_hdfs_GetDatanodeReportResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetDatanodeReportResponseProto_descriptor, new java.lang.String[] { "Di", }); internal_static_hadoop_hdfs_GetDatanodeStorageReportRequestProto_descriptor = getDescriptor().getMessageTypes().get(64); internal_static_hadoop_hdfs_GetDatanodeStorageReportRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetDatanodeStorageReportRequestProto_descriptor, new java.lang.String[] { "Type", }); internal_static_hadoop_hdfs_DatanodeStorageReportProto_descriptor = getDescriptor().getMessageTypes().get(65); internal_static_hadoop_hdfs_DatanodeStorageReportProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DatanodeStorageReportProto_descriptor, new java.lang.String[] { "DatanodeInfo", "StorageReports", }); internal_static_hadoop_hdfs_GetDatanodeStorageReportResponseProto_descriptor = getDescriptor().getMessageTypes().get(66); internal_static_hadoop_hdfs_GetDatanodeStorageReportResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetDatanodeStorageReportResponseProto_descriptor, new java.lang.String[] { "DatanodeStorageReports", }); internal_static_hadoop_hdfs_GetPreferredBlockSizeRequestProto_descriptor = getDescriptor().getMessageTypes().get(67); internal_static_hadoop_hdfs_GetPreferredBlockSizeRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetPreferredBlockSizeRequestProto_descriptor, new java.lang.String[] { "Filename", }); internal_static_hadoop_hdfs_GetPreferredBlockSizeResponseProto_descriptor = getDescriptor().getMessageTypes().get(68); internal_static_hadoop_hdfs_GetPreferredBlockSizeResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetPreferredBlockSizeResponseProto_descriptor, new java.lang.String[] { "Bsize", }); internal_static_hadoop_hdfs_SetSafeModeRequestProto_descriptor = getDescriptor().getMessageTypes().get(69); internal_static_hadoop_hdfs_SetSafeModeRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetSafeModeRequestProto_descriptor, new java.lang.String[] { "Action", "Checked", }); internal_static_hadoop_hdfs_SetSafeModeResponseProto_descriptor = getDescriptor().getMessageTypes().get(70); internal_static_hadoop_hdfs_SetSafeModeResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetSafeModeResponseProto_descriptor, new java.lang.String[] { "Result", }); internal_static_hadoop_hdfs_SaveNamespaceRequestProto_descriptor = getDescriptor().getMessageTypes().get(71); internal_static_hadoop_hdfs_SaveNamespaceRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SaveNamespaceRequestProto_descriptor, new java.lang.String[] { "TimeWindow", "TxGap", }); internal_static_hadoop_hdfs_SaveNamespaceResponseProto_descriptor = getDescriptor().getMessageTypes().get(72); internal_static_hadoop_hdfs_SaveNamespaceResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SaveNamespaceResponseProto_descriptor, new java.lang.String[] { "Saved", }); internal_static_hadoop_hdfs_RollEditsRequestProto_descriptor = getDescriptor().getMessageTypes().get(73); internal_static_hadoop_hdfs_RollEditsRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RollEditsRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_RollEditsResponseProto_descriptor = getDescriptor().getMessageTypes().get(74); internal_static_hadoop_hdfs_RollEditsResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RollEditsResponseProto_descriptor, new java.lang.String[] { "NewSegmentTxId", }); internal_static_hadoop_hdfs_RestoreFailedStorageRequestProto_descriptor = getDescriptor().getMessageTypes().get(75); internal_static_hadoop_hdfs_RestoreFailedStorageRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RestoreFailedStorageRequestProto_descriptor, new java.lang.String[] { "Arg", }); internal_static_hadoop_hdfs_RestoreFailedStorageResponseProto_descriptor = getDescriptor().getMessageTypes().get(76); internal_static_hadoop_hdfs_RestoreFailedStorageResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RestoreFailedStorageResponseProto_descriptor, new java.lang.String[] { "Result", }); internal_static_hadoop_hdfs_RefreshNodesRequestProto_descriptor = getDescriptor().getMessageTypes().get(77); internal_static_hadoop_hdfs_RefreshNodesRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RefreshNodesRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_RefreshNodesResponseProto_descriptor = getDescriptor().getMessageTypes().get(78); internal_static_hadoop_hdfs_RefreshNodesResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RefreshNodesResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_FinalizeUpgradeRequestProto_descriptor = getDescriptor().getMessageTypes().get(79); internal_static_hadoop_hdfs_FinalizeUpgradeRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_FinalizeUpgradeRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_FinalizeUpgradeResponseProto_descriptor = getDescriptor().getMessageTypes().get(80); internal_static_hadoop_hdfs_FinalizeUpgradeResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_FinalizeUpgradeResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_UpgradeStatusRequestProto_descriptor = getDescriptor().getMessageTypes().get(81); internal_static_hadoop_hdfs_UpgradeStatusRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_UpgradeStatusRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_UpgradeStatusResponseProto_descriptor = getDescriptor().getMessageTypes().get(82); internal_static_hadoop_hdfs_UpgradeStatusResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_UpgradeStatusResponseProto_descriptor, new java.lang.String[] { "UpgradeFinalized", }); internal_static_hadoop_hdfs_RollingUpgradeRequestProto_descriptor = getDescriptor().getMessageTypes().get(83); internal_static_hadoop_hdfs_RollingUpgradeRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RollingUpgradeRequestProto_descriptor, new java.lang.String[] { "Action", }); internal_static_hadoop_hdfs_RollingUpgradeInfoProto_descriptor = getDescriptor().getMessageTypes().get(84); internal_static_hadoop_hdfs_RollingUpgradeInfoProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RollingUpgradeInfoProto_descriptor, new java.lang.String[] { "Status", "StartTime", "FinalizeTime", "CreatedRollbackImages", }); internal_static_hadoop_hdfs_RollingUpgradeResponseProto_descriptor = getDescriptor().getMessageTypes().get(85); internal_static_hadoop_hdfs_RollingUpgradeResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RollingUpgradeResponseProto_descriptor, new java.lang.String[] { "RollingUpgradeInfo", }); internal_static_hadoop_hdfs_ListCorruptFileBlocksRequestProto_descriptor = getDescriptor().getMessageTypes().get(86); internal_static_hadoop_hdfs_ListCorruptFileBlocksRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ListCorruptFileBlocksRequestProto_descriptor, new java.lang.String[] { "Path", "Cookie", }); internal_static_hadoop_hdfs_ListCorruptFileBlocksResponseProto_descriptor = getDescriptor().getMessageTypes().get(87); internal_static_hadoop_hdfs_ListCorruptFileBlocksResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ListCorruptFileBlocksResponseProto_descriptor, new java.lang.String[] { "Corrupt", }); internal_static_hadoop_hdfs_MetaSaveRequestProto_descriptor = getDescriptor().getMessageTypes().get(88); internal_static_hadoop_hdfs_MetaSaveRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_MetaSaveRequestProto_descriptor, new java.lang.String[] { "Filename", }); internal_static_hadoop_hdfs_MetaSaveResponseProto_descriptor = getDescriptor().getMessageTypes().get(89); internal_static_hadoop_hdfs_MetaSaveResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_MetaSaveResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetFileInfoRequestProto_descriptor = getDescriptor().getMessageTypes().get(90); internal_static_hadoop_hdfs_GetFileInfoRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetFileInfoRequestProto_descriptor, new java.lang.String[] { "Src", }); internal_static_hadoop_hdfs_GetFileInfoResponseProto_descriptor = getDescriptor().getMessageTypes().get(91); internal_static_hadoop_hdfs_GetFileInfoResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetFileInfoResponseProto_descriptor, new java.lang.String[] { "Fs", }); internal_static_hadoop_hdfs_GetLocatedFileInfoRequestProto_descriptor = getDescriptor().getMessageTypes().get(92); internal_static_hadoop_hdfs_GetLocatedFileInfoRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetLocatedFileInfoRequestProto_descriptor, new java.lang.String[] { "Src", "NeedBlockToken", }); internal_static_hadoop_hdfs_GetLocatedFileInfoResponseProto_descriptor = getDescriptor().getMessageTypes().get(93); internal_static_hadoop_hdfs_GetLocatedFileInfoResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetLocatedFileInfoResponseProto_descriptor, new java.lang.String[] { "Fs", }); internal_static_hadoop_hdfs_IsFileClosedRequestProto_descriptor = getDescriptor().getMessageTypes().get(94); internal_static_hadoop_hdfs_IsFileClosedRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_IsFileClosedRequestProto_descriptor, new java.lang.String[] { "Src", }); internal_static_hadoop_hdfs_IsFileClosedResponseProto_descriptor = getDescriptor().getMessageTypes().get(95); internal_static_hadoop_hdfs_IsFileClosedResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_IsFileClosedResponseProto_descriptor, new java.lang.String[] { "Result", }); internal_static_hadoop_hdfs_CacheDirectiveInfoProto_descriptor = getDescriptor().getMessageTypes().get(96); internal_static_hadoop_hdfs_CacheDirectiveInfoProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CacheDirectiveInfoProto_descriptor, new java.lang.String[] { "Id", "Path", "Replication", "Pool", "Expiration", }); internal_static_hadoop_hdfs_CacheDirectiveInfoExpirationProto_descriptor = getDescriptor().getMessageTypes().get(97); internal_static_hadoop_hdfs_CacheDirectiveInfoExpirationProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CacheDirectiveInfoExpirationProto_descriptor, new java.lang.String[] { "Millis", "IsRelative", }); internal_static_hadoop_hdfs_CacheDirectiveStatsProto_descriptor = getDescriptor().getMessageTypes().get(98); internal_static_hadoop_hdfs_CacheDirectiveStatsProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CacheDirectiveStatsProto_descriptor, new java.lang.String[] { "BytesNeeded", "BytesCached", "FilesNeeded", "FilesCached", "HasExpired", }); internal_static_hadoop_hdfs_AddCacheDirectiveRequestProto_descriptor = getDescriptor().getMessageTypes().get(99); internal_static_hadoop_hdfs_AddCacheDirectiveRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AddCacheDirectiveRequestProto_descriptor, new java.lang.String[] { "Info", "CacheFlags", }); internal_static_hadoop_hdfs_AddCacheDirectiveResponseProto_descriptor = getDescriptor().getMessageTypes().get(100); internal_static_hadoop_hdfs_AddCacheDirectiveResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AddCacheDirectiveResponseProto_descriptor, new java.lang.String[] { "Id", }); internal_static_hadoop_hdfs_ModifyCacheDirectiveRequestProto_descriptor = getDescriptor().getMessageTypes().get(101); internal_static_hadoop_hdfs_ModifyCacheDirectiveRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ModifyCacheDirectiveRequestProto_descriptor, new java.lang.String[] { "Info", "CacheFlags", }); internal_static_hadoop_hdfs_ModifyCacheDirectiveResponseProto_descriptor = getDescriptor().getMessageTypes().get(102); internal_static_hadoop_hdfs_ModifyCacheDirectiveResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ModifyCacheDirectiveResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_RemoveCacheDirectiveRequestProto_descriptor = getDescriptor().getMessageTypes().get(103); internal_static_hadoop_hdfs_RemoveCacheDirectiveRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RemoveCacheDirectiveRequestProto_descriptor, new java.lang.String[] { "Id", }); internal_static_hadoop_hdfs_RemoveCacheDirectiveResponseProto_descriptor = getDescriptor().getMessageTypes().get(104); internal_static_hadoop_hdfs_RemoveCacheDirectiveResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RemoveCacheDirectiveResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_ListCacheDirectivesRequestProto_descriptor = getDescriptor().getMessageTypes().get(105); internal_static_hadoop_hdfs_ListCacheDirectivesRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ListCacheDirectivesRequestProto_descriptor, new java.lang.String[] { "PrevId", "Filter", }); internal_static_hadoop_hdfs_CacheDirectiveEntryProto_descriptor = getDescriptor().getMessageTypes().get(106); internal_static_hadoop_hdfs_CacheDirectiveEntryProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CacheDirectiveEntryProto_descriptor, new java.lang.String[] { "Info", "Stats", }); internal_static_hadoop_hdfs_ListCacheDirectivesResponseProto_descriptor = getDescriptor().getMessageTypes().get(107); internal_static_hadoop_hdfs_ListCacheDirectivesResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ListCacheDirectivesResponseProto_descriptor, new java.lang.String[] { "Elements", "HasMore", }); internal_static_hadoop_hdfs_CachePoolInfoProto_descriptor = getDescriptor().getMessageTypes().get(108); internal_static_hadoop_hdfs_CachePoolInfoProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CachePoolInfoProto_descriptor, new java.lang.String[] { "PoolName", "OwnerName", "GroupName", "Mode", "Limit", "MaxRelativeExpiry", "DefaultReplication", }); internal_static_hadoop_hdfs_CachePoolStatsProto_descriptor = getDescriptor().getMessageTypes().get(109); internal_static_hadoop_hdfs_CachePoolStatsProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CachePoolStatsProto_descriptor, new java.lang.String[] { "BytesNeeded", "BytesCached", "BytesOverlimit", "FilesNeeded", "FilesCached", }); internal_static_hadoop_hdfs_AddCachePoolRequestProto_descriptor = getDescriptor().getMessageTypes().get(110); internal_static_hadoop_hdfs_AddCachePoolRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AddCachePoolRequestProto_descriptor, new java.lang.String[] { "Info", }); internal_static_hadoop_hdfs_AddCachePoolResponseProto_descriptor = getDescriptor().getMessageTypes().get(111); internal_static_hadoop_hdfs_AddCachePoolResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AddCachePoolResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_ModifyCachePoolRequestProto_descriptor = getDescriptor().getMessageTypes().get(112); internal_static_hadoop_hdfs_ModifyCachePoolRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ModifyCachePoolRequestProto_descriptor, new java.lang.String[] { "Info", }); internal_static_hadoop_hdfs_ModifyCachePoolResponseProto_descriptor = getDescriptor().getMessageTypes().get(113); internal_static_hadoop_hdfs_ModifyCachePoolResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ModifyCachePoolResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_RemoveCachePoolRequestProto_descriptor = getDescriptor().getMessageTypes().get(114); internal_static_hadoop_hdfs_RemoveCachePoolRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RemoveCachePoolRequestProto_descriptor, new java.lang.String[] { "PoolName", }); internal_static_hadoop_hdfs_RemoveCachePoolResponseProto_descriptor = getDescriptor().getMessageTypes().get(115); internal_static_hadoop_hdfs_RemoveCachePoolResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RemoveCachePoolResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_ListCachePoolsRequestProto_descriptor = getDescriptor().getMessageTypes().get(116); internal_static_hadoop_hdfs_ListCachePoolsRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ListCachePoolsRequestProto_descriptor, new java.lang.String[] { "PrevPoolName", }); internal_static_hadoop_hdfs_ListCachePoolsResponseProto_descriptor = getDescriptor().getMessageTypes().get(117); internal_static_hadoop_hdfs_ListCachePoolsResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ListCachePoolsResponseProto_descriptor, new java.lang.String[] { "Entries", "HasMore", }); internal_static_hadoop_hdfs_CachePoolEntryProto_descriptor = getDescriptor().getMessageTypes().get(118); internal_static_hadoop_hdfs_CachePoolEntryProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CachePoolEntryProto_descriptor, new java.lang.String[] { "Info", "Stats", }); internal_static_hadoop_hdfs_GetFileLinkInfoRequestProto_descriptor = getDescriptor().getMessageTypes().get(119); internal_static_hadoop_hdfs_GetFileLinkInfoRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetFileLinkInfoRequestProto_descriptor, new java.lang.String[] { "Src", }); internal_static_hadoop_hdfs_GetFileLinkInfoResponseProto_descriptor = getDescriptor().getMessageTypes().get(120); internal_static_hadoop_hdfs_GetFileLinkInfoResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetFileLinkInfoResponseProto_descriptor, new java.lang.String[] { "Fs", }); internal_static_hadoop_hdfs_GetContentSummaryRequestProto_descriptor = getDescriptor().getMessageTypes().get(121); internal_static_hadoop_hdfs_GetContentSummaryRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetContentSummaryRequestProto_descriptor, new java.lang.String[] { "Path", }); internal_static_hadoop_hdfs_GetContentSummaryResponseProto_descriptor = getDescriptor().getMessageTypes().get(122); internal_static_hadoop_hdfs_GetContentSummaryResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetContentSummaryResponseProto_descriptor, new java.lang.String[] { "Summary", }); internal_static_hadoop_hdfs_GetQuotaUsageRequestProto_descriptor = getDescriptor().getMessageTypes().get(123); internal_static_hadoop_hdfs_GetQuotaUsageRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetQuotaUsageRequestProto_descriptor, new java.lang.String[] { "Path", }); internal_static_hadoop_hdfs_GetQuotaUsageResponseProto_descriptor = getDescriptor().getMessageTypes().get(124); internal_static_hadoop_hdfs_GetQuotaUsageResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetQuotaUsageResponseProto_descriptor, new java.lang.String[] { "Usage", }); internal_static_hadoop_hdfs_SetQuotaRequestProto_descriptor = getDescriptor().getMessageTypes().get(125); internal_static_hadoop_hdfs_SetQuotaRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetQuotaRequestProto_descriptor, new java.lang.String[] { "Path", "NamespaceQuota", "StoragespaceQuota", "StorageType", }); internal_static_hadoop_hdfs_SetQuotaResponseProto_descriptor = getDescriptor().getMessageTypes().get(126); internal_static_hadoop_hdfs_SetQuotaResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetQuotaResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_FsyncRequestProto_descriptor = getDescriptor().getMessageTypes().get(127); internal_static_hadoop_hdfs_FsyncRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_FsyncRequestProto_descriptor, new java.lang.String[] { "Src", "Client", "LastBlockLength", "FileId", }); internal_static_hadoop_hdfs_FsyncResponseProto_descriptor = getDescriptor().getMessageTypes().get(128); internal_static_hadoop_hdfs_FsyncResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_FsyncResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_SetTimesRequestProto_descriptor = getDescriptor().getMessageTypes().get(129); internal_static_hadoop_hdfs_SetTimesRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetTimesRequestProto_descriptor, new java.lang.String[] { "Src", "Mtime", "Atime", }); internal_static_hadoop_hdfs_SetTimesResponseProto_descriptor = getDescriptor().getMessageTypes().get(130); internal_static_hadoop_hdfs_SetTimesResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetTimesResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_CreateSymlinkRequestProto_descriptor = getDescriptor().getMessageTypes().get(131); internal_static_hadoop_hdfs_CreateSymlinkRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CreateSymlinkRequestProto_descriptor, new java.lang.String[] { "Target", "Link", "DirPerm", "CreateParent", }); internal_static_hadoop_hdfs_CreateSymlinkResponseProto_descriptor = getDescriptor().getMessageTypes().get(132); internal_static_hadoop_hdfs_CreateSymlinkResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CreateSymlinkResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetLinkTargetRequestProto_descriptor = getDescriptor().getMessageTypes().get(133); internal_static_hadoop_hdfs_GetLinkTargetRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetLinkTargetRequestProto_descriptor, new java.lang.String[] { "Path", }); internal_static_hadoop_hdfs_GetLinkTargetResponseProto_descriptor = getDescriptor().getMessageTypes().get(134); internal_static_hadoop_hdfs_GetLinkTargetResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetLinkTargetResponseProto_descriptor, new java.lang.String[] { "TargetPath", }); internal_static_hadoop_hdfs_UpdateBlockForPipelineRequestProto_descriptor = getDescriptor().getMessageTypes().get(135); internal_static_hadoop_hdfs_UpdateBlockForPipelineRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_UpdateBlockForPipelineRequestProto_descriptor, new java.lang.String[] { "Block", "ClientName", }); internal_static_hadoop_hdfs_UpdateBlockForPipelineResponseProto_descriptor = getDescriptor().getMessageTypes().get(136); internal_static_hadoop_hdfs_UpdateBlockForPipelineResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_UpdateBlockForPipelineResponseProto_descriptor, new java.lang.String[] { "Block", }); internal_static_hadoop_hdfs_UpdatePipelineRequestProto_descriptor = getDescriptor().getMessageTypes().get(137); internal_static_hadoop_hdfs_UpdatePipelineRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_UpdatePipelineRequestProto_descriptor, new java.lang.String[] { "ClientName", "OldBlock", "NewBlock", "NewNodes", "StorageIDs", }); internal_static_hadoop_hdfs_UpdatePipelineResponseProto_descriptor = getDescriptor().getMessageTypes().get(138); internal_static_hadoop_hdfs_UpdatePipelineResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_UpdatePipelineResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_SetBalancerBandwidthRequestProto_descriptor = getDescriptor().getMessageTypes().get(139); internal_static_hadoop_hdfs_SetBalancerBandwidthRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetBalancerBandwidthRequestProto_descriptor, new java.lang.String[] { "Bandwidth", }); internal_static_hadoop_hdfs_SetBalancerBandwidthResponseProto_descriptor = getDescriptor().getMessageTypes().get(140); internal_static_hadoop_hdfs_SetBalancerBandwidthResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SetBalancerBandwidthResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetDataEncryptionKeyRequestProto_descriptor = getDescriptor().getMessageTypes().get(141); internal_static_hadoop_hdfs_GetDataEncryptionKeyRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetDataEncryptionKeyRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetDataEncryptionKeyResponseProto_descriptor = getDescriptor().getMessageTypes().get(142); internal_static_hadoop_hdfs_GetDataEncryptionKeyResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetDataEncryptionKeyResponseProto_descriptor, new java.lang.String[] { "DataEncryptionKey", }); internal_static_hadoop_hdfs_CreateSnapshotRequestProto_descriptor = getDescriptor().getMessageTypes().get(143); internal_static_hadoop_hdfs_CreateSnapshotRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CreateSnapshotRequestProto_descriptor, new java.lang.String[] { "SnapshotRoot", "SnapshotName", }); internal_static_hadoop_hdfs_CreateSnapshotResponseProto_descriptor = getDescriptor().getMessageTypes().get(144); internal_static_hadoop_hdfs_CreateSnapshotResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CreateSnapshotResponseProto_descriptor, new java.lang.String[] { "SnapshotPath", }); internal_static_hadoop_hdfs_RenameSnapshotRequestProto_descriptor = getDescriptor().getMessageTypes().get(145); internal_static_hadoop_hdfs_RenameSnapshotRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RenameSnapshotRequestProto_descriptor, new java.lang.String[] { "SnapshotRoot", "SnapshotOldName", "SnapshotNewName", }); internal_static_hadoop_hdfs_RenameSnapshotResponseProto_descriptor = getDescriptor().getMessageTypes().get(146); internal_static_hadoop_hdfs_RenameSnapshotResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_RenameSnapshotResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_AllowSnapshotRequestProto_descriptor = getDescriptor().getMessageTypes().get(147); internal_static_hadoop_hdfs_AllowSnapshotRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AllowSnapshotRequestProto_descriptor, new java.lang.String[] { "SnapshotRoot", }); internal_static_hadoop_hdfs_AllowSnapshotResponseProto_descriptor = getDescriptor().getMessageTypes().get(148); internal_static_hadoop_hdfs_AllowSnapshotResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_AllowSnapshotResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_DisallowSnapshotRequestProto_descriptor = getDescriptor().getMessageTypes().get(149); internal_static_hadoop_hdfs_DisallowSnapshotRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DisallowSnapshotRequestProto_descriptor, new java.lang.String[] { "SnapshotRoot", }); internal_static_hadoop_hdfs_DisallowSnapshotResponseProto_descriptor = getDescriptor().getMessageTypes().get(150); internal_static_hadoop_hdfs_DisallowSnapshotResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DisallowSnapshotResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_DeleteSnapshotRequestProto_descriptor = getDescriptor().getMessageTypes().get(151); internal_static_hadoop_hdfs_DeleteSnapshotRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DeleteSnapshotRequestProto_descriptor, new java.lang.String[] { "SnapshotRoot", "SnapshotName", }); internal_static_hadoop_hdfs_DeleteSnapshotResponseProto_descriptor = getDescriptor().getMessageTypes().get(152); internal_static_hadoop_hdfs_DeleteSnapshotResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_DeleteSnapshotResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_CheckAccessRequestProto_descriptor = getDescriptor().getMessageTypes().get(153); internal_static_hadoop_hdfs_CheckAccessRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CheckAccessRequestProto_descriptor, new java.lang.String[] { "Path", "Mode", }); internal_static_hadoop_hdfs_CheckAccessResponseProto_descriptor = getDescriptor().getMessageTypes().get(154); internal_static_hadoop_hdfs_CheckAccessResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_CheckAccessResponseProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetCurrentEditLogTxidRequestProto_descriptor = getDescriptor().getMessageTypes().get(155); internal_static_hadoop_hdfs_GetCurrentEditLogTxidRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetCurrentEditLogTxidRequestProto_descriptor, new java.lang.String[] { }); internal_static_hadoop_hdfs_GetCurrentEditLogTxidResponseProto_descriptor = getDescriptor().getMessageTypes().get(156); internal_static_hadoop_hdfs_GetCurrentEditLogTxidResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetCurrentEditLogTxidResponseProto_descriptor, new java.lang.String[] { "Txid", }); internal_static_hadoop_hdfs_GetEditsFromTxidRequestProto_descriptor = getDescriptor().getMessageTypes().get(157); internal_static_hadoop_hdfs_GetEditsFromTxidRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetEditsFromTxidRequestProto_descriptor, new java.lang.String[] { "Txid", }); internal_static_hadoop_hdfs_GetEditsFromTxidResponseProto_descriptor = getDescriptor().getMessageTypes().get(158); internal_static_hadoop_hdfs_GetEditsFromTxidResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_GetEditsFromTxidResponseProto_descriptor, new java.lang.String[] { "EventsList", }); internal_static_hadoop_hdfs_ListOpenFilesRequestProto_descriptor = getDescriptor().getMessageTypes().get(159); internal_static_hadoop_hdfs_ListOpenFilesRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ListOpenFilesRequestProto_descriptor, new java.lang.String[] { "Id", "Types", "Path", }); internal_static_hadoop_hdfs_OpenFilesBatchResponseProto_descriptor = getDescriptor().getMessageTypes().get(160); internal_static_hadoop_hdfs_OpenFilesBatchResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_OpenFilesBatchResponseProto_descriptor, new java.lang.String[] { "Id", "Path", "ClientName", "ClientMachine", }); internal_static_hadoop_hdfs_ListOpenFilesResponseProto_descriptor = getDescriptor().getMessageTypes().get(161); internal_static_hadoop_hdfs_ListOpenFilesResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_ListOpenFilesResponseProto_descriptor, new java.lang.String[] { "Entries", "HasMore", "Types", }); internal_static_hadoop_hdfs_SatisfyStoragePolicyRequestProto_descriptor = getDescriptor().getMessageTypes().get(162); internal_static_hadoop_hdfs_SatisfyStoragePolicyRequestProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SatisfyStoragePolicyRequestProto_descriptor, new java.lang.String[] { "Src", }); internal_static_hadoop_hdfs_SatisfyStoragePolicyResponseProto_descriptor = getDescriptor().getMessageTypes().get(163); internal_static_hadoop_hdfs_SatisfyStoragePolicyResponseProto_fieldAccessorTable = new io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hadoop_hdfs_SatisfyStoragePolicyResponseProto_descriptor, new java.lang.String[] { }); return null; } }; io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new io.prestosql.hadoop.$internal.com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.InotifyProtos.getDescriptor(), org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.getDescriptor(), }, assigner); } // @@protoc_insertion_point(outer_class_scope) }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy