org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos Maven / Gradle / Ivy
// Generated by the protocol buffer org.apache.hadoop.shaded.com.iler. DO NOT EDIT!
// source: ClientDatanodeProtocol.proto
package org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto;
public final class ClientDatanodeProtocolProtos {
private ClientDatanodeProtocolProtos() {}
public static void registerAllExtensions(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
}
public interface GetReplicaVisibleLengthRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.GetReplicaVisibleLengthRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
}
/**
*
**
* block - block for which visible length is requested
*
*
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthRequestProto}
*/
public static final class GetReplicaVisibleLengthRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.GetReplicaVisibleLengthRequestProto)
GetReplicaVisibleLengthRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use GetReplicaVisibleLengthRequestProto.newBuilder() to construct.
private GetReplicaVisibleLengthRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GetReplicaVisibleLengthRequestProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetReplicaVisibleLengthRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.Builder.class);
}
private int bitField0_;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getBlock());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(1, getBlock());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) obj;
if (hasBlock() != other.hasBlock()) return false;
if (hasBlock()) {
if (!getBlock()
.equals(other.getBlock())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* block - block for which visible length is requested
*
*
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.GetReplicaVisibleLengthRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = null;
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
block_ != null &&
block_ != org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = null;
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_ == null ?
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
getBlock(),
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetReplicaVisibleLengthRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetReplicaVisibleLengthRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public GetReplicaVisibleLengthRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new GetReplicaVisibleLengthRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface GetReplicaVisibleLengthResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.GetReplicaVisibleLengthResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required uint64 length = 1;
*/
boolean hasLength();
/**
* required uint64 length = 1;
*/
long getLength();
}
/**
*
**
* length - visible length of the block
*
*
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthResponseProto}
*/
public static final class GetReplicaVisibleLengthResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.GetReplicaVisibleLengthResponseProto)
GetReplicaVisibleLengthResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use GetReplicaVisibleLengthResponseProto.newBuilder() to construct.
private GetReplicaVisibleLengthResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GetReplicaVisibleLengthResponseProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetReplicaVisibleLengthResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
length_ = input.readUInt64();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.Builder.class);
}
private int bitField0_;
public static final int LENGTH_FIELD_NUMBER = 1;
private long length_;
/**
* required uint64 length = 1;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required uint64 length = 1;
*/
public long getLength() {
return length_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasLength()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeUInt64(1, length_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteUInt64Size(1, length_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) obj;
if (hasLength() != other.hasLength()) return false;
if (hasLength()) {
if (getLength()
!= other.getLength()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasLength()) {
hash = (37 * hash) + LENGTH_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getLength());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* length - visible length of the block
*
*
* Protobuf type {@code hadoop.hdfs.GetReplicaVisibleLengthResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.GetReplicaVisibleLengthResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
length_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.length_ = length_;
to_bitField0_ |= 0x00000001;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance()) return this;
if (other.hasLength()) {
setLength(other.getLength());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasLength()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private long length_ ;
/**
* required uint64 length = 1;
*/
public boolean hasLength() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required uint64 length = 1;
*/
public long getLength() {
return length_;
}
/**
* required uint64 length = 1;
*/
public Builder setLength(long value) {
bitField0_ |= 0x00000001;
length_ = value;
onChanged();
return this;
}
/**
* required uint64 length = 1;
*/
public Builder clearLength() {
bitField0_ = (bitField0_ & ~0x00000001);
length_ = 0L;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetReplicaVisibleLengthResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetReplicaVisibleLengthResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public GetReplicaVisibleLengthResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new GetReplicaVisibleLengthResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface RefreshNamenodesRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.RefreshNamenodesRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
*
**
* void request
*
*
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesRequestProto}
*/
public static final class RefreshNamenodesRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.RefreshNamenodesRequestProto)
RefreshNamenodesRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use RefreshNamenodesRequestProto.newBuilder() to construct.
private RefreshNamenodesRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private RefreshNamenodesRequestProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RefreshNamenodesRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* void request
*
*
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.RefreshNamenodesRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.RefreshNamenodesRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.RefreshNamenodesRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public RefreshNamenodesRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new RefreshNamenodesRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface RefreshNamenodesResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.RefreshNamenodesResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
*
**
* void response
*
*
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesResponseProto}
*/
public static final class RefreshNamenodesResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.RefreshNamenodesResponseProto)
RefreshNamenodesResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use RefreshNamenodesResponseProto.newBuilder() to construct.
private RefreshNamenodesResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private RefreshNamenodesResponseProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RefreshNamenodesResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* void response
*
*
* Protobuf type {@code hadoop.hdfs.RefreshNamenodesResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.RefreshNamenodesResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.RefreshNamenodesResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.RefreshNamenodesResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public RefreshNamenodesResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new RefreshNamenodesResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface DeleteBlockPoolRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.DeleteBlockPoolRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required string blockPool = 1;
*/
boolean hasBlockPool();
/**
* required string blockPool = 1;
*/
java.lang.String getBlockPool();
/**
* required string blockPool = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getBlockPoolBytes();
/**
* required bool force = 2;
*/
boolean hasForce();
/**
* required bool force = 2;
*/
boolean getForce();
}
/**
*
**
* blockPool - block pool to be deleted
* force - if false, delete the block pool only if it is empty.
* if true, delete the block pool even if it has blocks.
*
*
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolRequestProto}
*/
public static final class DeleteBlockPoolRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.DeleteBlockPoolRequestProto)
DeleteBlockPoolRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use DeleteBlockPoolRequestProto.newBuilder() to construct.
private DeleteBlockPoolRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private DeleteBlockPoolRequestProto() {
blockPool_ = "";
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DeleteBlockPoolRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000001;
blockPool_ = bs;
break;
}
case 16: {
bitField0_ |= 0x00000002;
force_ = input.readBool();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.Builder.class);
}
private int bitField0_;
public static final int BLOCKPOOL_FIELD_NUMBER = 1;
private volatile java.lang.Object blockPool_;
/**
* required string blockPool = 1;
*/
public boolean hasBlockPool() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string blockPool = 1;
*/
public java.lang.String getBlockPool() {
java.lang.Object ref = blockPool_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPool_ = s;
}
return s;
}
}
/**
* required string blockPool = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getBlockPoolBytes() {
java.lang.Object ref = blockPool_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPool_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int FORCE_FIELD_NUMBER = 2;
private boolean force_;
/**
* required bool force = 2;
*/
public boolean hasForce() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required bool force = 2;
*/
public boolean getForce() {
return force_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasBlockPool()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasForce()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, blockPool_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeBool(2, force_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(1, blockPool_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBoolSize(2, force_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) obj;
if (hasBlockPool() != other.hasBlockPool()) return false;
if (hasBlockPool()) {
if (!getBlockPool()
.equals(other.getBlockPool())) return false;
}
if (hasForce() != other.hasForce()) return false;
if (hasForce()) {
if (getForce()
!= other.getForce()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasBlockPool()) {
hash = (37 * hash) + BLOCKPOOL_FIELD_NUMBER;
hash = (53 * hash) + getBlockPool().hashCode();
}
if (hasForce()) {
hash = (37 * hash) + FORCE_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getForce());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* blockPool - block pool to be deleted
* force - if false, delete the block pool only if it is empty.
* if true, delete the block pool even if it has blocks.
*
*
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.DeleteBlockPoolRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
blockPool_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
force_ = false;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.blockPool_ = blockPool_;
if (((from_bitField0_ & 0x00000002) != 0)) {
result.force_ = force_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance()) return this;
if (other.hasBlockPool()) {
bitField0_ |= 0x00000001;
blockPool_ = other.blockPool_;
onChanged();
}
if (other.hasForce()) {
setForce(other.getForce());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasBlockPool()) {
return false;
}
if (!hasForce()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.lang.Object blockPool_ = "";
/**
* required string blockPool = 1;
*/
public boolean hasBlockPool() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string blockPool = 1;
*/
public java.lang.String getBlockPool() {
java.lang.Object ref = blockPool_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
blockPool_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string blockPool = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getBlockPoolBytes() {
java.lang.Object ref = blockPool_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
blockPool_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string blockPool = 1;
*/
public Builder setBlockPool(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPool_ = value;
onChanged();
return this;
}
/**
* required string blockPool = 1;
*/
public Builder clearBlockPool() {
bitField0_ = (bitField0_ & ~0x00000001);
blockPool_ = getDefaultInstance().getBlockPool();
onChanged();
return this;
}
/**
* required string blockPool = 1;
*/
public Builder setBlockPoolBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
blockPool_ = value;
onChanged();
return this;
}
private boolean force_ ;
/**
* required bool force = 2;
*/
public boolean hasForce() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required bool force = 2;
*/
public boolean getForce() {
return force_;
}
/**
* required bool force = 2;
*/
public Builder setForce(boolean value) {
bitField0_ |= 0x00000002;
force_ = value;
onChanged();
return this;
}
/**
* required bool force = 2;
*/
public Builder clearForce() {
bitField0_ = (bitField0_ & ~0x00000002);
force_ = false;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DeleteBlockPoolRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DeleteBlockPoolRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public DeleteBlockPoolRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new DeleteBlockPoolRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface DeleteBlockPoolResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.DeleteBlockPoolResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
*
**
* void response
*
*
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolResponseProto}
*/
public static final class DeleteBlockPoolResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.DeleteBlockPoolResponseProto)
DeleteBlockPoolResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use DeleteBlockPoolResponseProto.newBuilder() to construct.
private DeleteBlockPoolResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private DeleteBlockPoolResponseProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DeleteBlockPoolResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* void response
*
*
* Protobuf type {@code hadoop.hdfs.DeleteBlockPoolResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.DeleteBlockPoolResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DeleteBlockPoolResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DeleteBlockPoolResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public DeleteBlockPoolResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new DeleteBlockPoolResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface GetBlockLocalPathInfoRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.GetBlockLocalPathInfoRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
boolean hasToken();
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken();
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder();
}
/**
*
**
* Gets the file information where block and its metadata is stored
* block - block for which path information is being requested
* token - block token
* This message is deprecated in favor of file descriptor passing.
*
*
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoRequestProto}
*/
public static final class GetBlockLocalPathInfoRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.GetBlockLocalPathInfoRequestProto)
GetBlockLocalPathInfoRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use GetBlockLocalPathInfoRequestProto.newBuilder() to construct.
private GetBlockLocalPathInfoRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GetBlockLocalPathInfoRequestProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetBlockLocalPathInfoRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) != 0)) {
subBuilder = token_.toBuilder();
}
token_ = input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(token_);
token_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.Builder.class);
}
private int bitField0_;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
public static final int TOKEN_FIELD_NUMBER = 2;
private org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_;
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public boolean hasToken() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
return token_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
return token_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasToken()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getToken().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getBlock());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(2, getToken());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(1, getBlock());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(2, getToken());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) obj;
if (hasBlock() != other.hasBlock()) return false;
if (hasBlock()) {
if (!getBlock()
.equals(other.getBlock())) return false;
}
if (hasToken() != other.hasToken()) return false;
if (hasToken()) {
if (!getToken()
.equals(other.getToken())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasToken()) {
hash = (37 * hash) + TOKEN_FIELD_NUMBER;
hash = (53 * hash) + getToken().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* Gets the file information where block and its metadata is stored
* block - block for which path information is being requested
* token - block token
* This message is deprecated in favor of file descriptor passing.
*
*
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.GetBlockLocalPathInfoRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
getTokenFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = null;
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (tokenBuilder_ == null) {
token_ = null;
} else {
tokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
if (tokenBuilder_ == null) {
result.token_ = token_;
} else {
result.token_ = tokenBuilder_.build();
}
to_bitField0_ |= 0x00000002;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasToken()) {
mergeToken(other.getToken());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!hasToken()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
if (!getToken().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
block_ != null &&
block_ != org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = null;
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_ == null ?
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
getBlock(),
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
private org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_;
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> tokenBuilder_;
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public boolean hasToken() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
if (tokenBuilder_ == null) {
return token_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
} else {
return tokenBuilder_.getMessage();
}
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public Builder setToken(org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokenBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
token_ = value;
onChanged();
} else {
tokenBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public Builder setToken(
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
if (tokenBuilder_ == null) {
token_ = builderForValue.build();
onChanged();
} else {
tokenBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public Builder mergeToken(org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
if (tokenBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0) &&
token_ != null &&
token_ != org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) {
token_ =
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.newBuilder(token_).mergeFrom(value).buildPartial();
} else {
token_ = value;
}
onChanged();
} else {
tokenBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public Builder clearToken() {
if (tokenBuilder_ == null) {
token_ = null;
onChanged();
} else {
tokenBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getTokenBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTokenFieldBuilder().getBuilder();
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
if (tokenBuilder_ != null) {
return tokenBuilder_.getMessageOrBuilder();
} else {
return token_ == null ?
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
}
}
/**
* required .hadoop.org.apache.hadoop.shaded.com.on.TokenProto token = 2;
*/
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>
getTokenFieldBuilder() {
if (tokenBuilder_ == null) {
tokenBuilder_ = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
getToken(),
getParentForChildren(),
isClean());
token_ = null;
}
return tokenBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBlockLocalPathInfoRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBlockLocalPathInfoRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public GetBlockLocalPathInfoRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new GetBlockLocalPathInfoRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface GetBlockLocalPathInfoResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.GetBlockLocalPathInfoResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
boolean hasBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
/**
* required string localPath = 2;
*/
boolean hasLocalPath();
/**
* required string localPath = 2;
*/
java.lang.String getLocalPath();
/**
* required string localPath = 2;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getLocalPathBytes();
/**
* required string localMetaPath = 3;
*/
boolean hasLocalMetaPath();
/**
* required string localMetaPath = 3;
*/
java.lang.String getLocalMetaPath();
/**
* required string localMetaPath = 3;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getLocalMetaPathBytes();
}
/**
*
**
* block - block for which file path information is being returned
* localPath - file path where the block data is stored
* localMetaPath - file path where the block meta data is stored
* This message is deprecated in favor of file descriptor passing.
*
*
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoResponseProto}
*/
public static final class GetBlockLocalPathInfoResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.GetBlockLocalPathInfoResponseProto)
GetBlockLocalPathInfoResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use GetBlockLocalPathInfoResponseProto.newBuilder() to construct.
private GetBlockLocalPathInfoResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GetBlockLocalPathInfoResponseProto() {
localPath_ = "";
localMetaPath_ = "";
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetBlockLocalPathInfoResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = block_.toBuilder();
}
block_ = input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(block_);
block_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000002;
localPath_ = bs;
break;
}
case 26: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000004;
localMetaPath_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.Builder.class);
}
private int bitField0_;
public static final int BLOCK_FIELD_NUMBER = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
return block_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
return block_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
public static final int LOCALPATH_FIELD_NUMBER = 2;
private volatile java.lang.Object localPath_;
/**
* required string localPath = 2;
*/
public boolean hasLocalPath() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required string localPath = 2;
*/
public java.lang.String getLocalPath() {
java.lang.Object ref = localPath_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
localPath_ = s;
}
return s;
}
}
/**
* required string localPath = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getLocalPathBytes() {
java.lang.Object ref = localPath_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localPath_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int LOCALMETAPATH_FIELD_NUMBER = 3;
private volatile java.lang.Object localMetaPath_;
/**
* required string localMetaPath = 3;
*/
public boolean hasLocalMetaPath() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required string localMetaPath = 3;
*/
public java.lang.String getLocalMetaPath() {
java.lang.Object ref = localMetaPath_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
localMetaPath_ = s;
}
return s;
}
}
/**
* required string localMetaPath = 3;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getLocalMetaPathBytes() {
java.lang.Object ref = localMetaPath_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localMetaPath_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasBlock()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLocalPath()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasLocalMetaPath()) {
memoizedIsInitialized = 0;
return false;
}
if (!getBlock().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getBlock());
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, localPath_);
}
if (((bitField0_ & 0x00000004) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, localMetaPath_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(1, getBlock());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(2, localPath_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(3, localMetaPath_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) obj;
if (hasBlock() != other.hasBlock()) return false;
if (hasBlock()) {
if (!getBlock()
.equals(other.getBlock())) return false;
}
if (hasLocalPath() != other.hasLocalPath()) return false;
if (hasLocalPath()) {
if (!getLocalPath()
.equals(other.getLocalPath())) return false;
}
if (hasLocalMetaPath() != other.hasLocalMetaPath()) return false;
if (hasLocalMetaPath()) {
if (!getLocalMetaPath()
.equals(other.getLocalMetaPath())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasBlock()) {
hash = (37 * hash) + BLOCK_FIELD_NUMBER;
hash = (53 * hash) + getBlock().hashCode();
}
if (hasLocalPath()) {
hash = (37 * hash) + LOCALPATH_FIELD_NUMBER;
hash = (53 * hash) + getLocalPath().hashCode();
}
if (hasLocalMetaPath()) {
hash = (37 * hash) + LOCALMETAPATH_FIELD_NUMBER;
hash = (53 * hash) + getLocalMetaPath().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* block - block for which file path information is being returned
* localPath - file path where the block data is stored
* localMetaPath - file path where the block meta data is stored
* This message is deprecated in favor of file descriptor passing.
*
*
* Protobuf type {@code hadoop.hdfs.GetBlockLocalPathInfoResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.GetBlockLocalPathInfoResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getBlockFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (blockBuilder_ == null) {
block_ = null;
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
localPath_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
localMetaPath_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (blockBuilder_ == null) {
result.block_ = block_;
} else {
result.block_ = blockBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.localPath_ = localPath_;
if (((from_bitField0_ & 0x00000004) != 0)) {
to_bitField0_ |= 0x00000004;
}
result.localMetaPath_ = localMetaPath_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance()) return this;
if (other.hasBlock()) {
mergeBlock(other.getBlock());
}
if (other.hasLocalPath()) {
bitField0_ |= 0x00000002;
localPath_ = other.localPath_;
onChanged();
}
if (other.hasLocalMetaPath()) {
bitField0_ |= 0x00000004;
localMetaPath_ = other.localMetaPath_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasBlock()) {
return false;
}
if (!hasLocalPath()) {
return false;
}
if (!hasLocalMetaPath()) {
return false;
}
if (!getBlock().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public boolean hasBlock() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
if (blockBuilder_ == null) {
return block_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
} else {
return blockBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
block_ = value;
onChanged();
} else {
blockBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder setBlock(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
if (blockBuilder_ == null) {
block_ = builderForValue.build();
onChanged();
} else {
blockBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder mergeBlock(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
if (blockBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
block_ != null &&
block_ != org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
block_ =
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
} else {
block_ = value;
}
onChanged();
} else {
blockBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public Builder clearBlock() {
if (blockBuilder_ == null) {
block_ = null;
onChanged();
} else {
blockBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getBlockFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
if (blockBuilder_ != null) {
return blockBuilder_.getMessageOrBuilder();
} else {
return block_ == null ?
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
}
}
/**
* required .hadoop.hdfs.ExtendedBlockProto block = 1;
*/
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
getBlockFieldBuilder() {
if (blockBuilder_ == null) {
blockBuilder_ = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
getBlock(),
getParentForChildren(),
isClean());
block_ = null;
}
return blockBuilder_;
}
private java.lang.Object localPath_ = "";
/**
* required string localPath = 2;
*/
public boolean hasLocalPath() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* required string localPath = 2;
*/
public java.lang.String getLocalPath() {
java.lang.Object ref = localPath_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
localPath_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string localPath = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getLocalPathBytes() {
java.lang.Object ref = localPath_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localPath_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string localPath = 2;
*/
public Builder setLocalPath(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
localPath_ = value;
onChanged();
return this;
}
/**
* required string localPath = 2;
*/
public Builder clearLocalPath() {
bitField0_ = (bitField0_ & ~0x00000002);
localPath_ = getDefaultInstance().getLocalPath();
onChanged();
return this;
}
/**
* required string localPath = 2;
*/
public Builder setLocalPathBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
localPath_ = value;
onChanged();
return this;
}
private java.lang.Object localMetaPath_ = "";
/**
* required string localMetaPath = 3;
*/
public boolean hasLocalMetaPath() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* required string localMetaPath = 3;
*/
public java.lang.String getLocalMetaPath() {
java.lang.Object ref = localMetaPath_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
localMetaPath_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string localMetaPath = 3;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getLocalMetaPathBytes() {
java.lang.Object ref = localMetaPath_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
localMetaPath_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string localMetaPath = 3;
*/
public Builder setLocalMetaPath(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
localMetaPath_ = value;
onChanged();
return this;
}
/**
* required string localMetaPath = 3;
*/
public Builder clearLocalMetaPath() {
bitField0_ = (bitField0_ & ~0x00000004);
localMetaPath_ = getDefaultInstance().getLocalMetaPath();
onChanged();
return this;
}
/**
* required string localMetaPath = 3;
*/
public Builder setLocalMetaPathBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
localMetaPath_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBlockLocalPathInfoResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBlockLocalPathInfoResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public GetBlockLocalPathInfoResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new GetBlockLocalPathInfoResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ShutdownDatanodeRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShutdownDatanodeRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required bool forUpgrade = 1;
*/
boolean hasForUpgrade();
/**
* required bool forUpgrade = 1;
*/
boolean getForUpgrade();
}
/**
*
**
* forUpgrade - if true, clients are advised to wait for restart and quick
* upgrade restart is instrumented. Otherwise, datanode does
* the regular shutdown.
*
*
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeRequestProto}
*/
public static final class ShutdownDatanodeRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ShutdownDatanodeRequestProto)
ShutdownDatanodeRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ShutdownDatanodeRequestProto.newBuilder() to construct.
private ShutdownDatanodeRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ShutdownDatanodeRequestProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShutdownDatanodeRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
forUpgrade_ = input.readBool();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.Builder.class);
}
private int bitField0_;
public static final int FORUPGRADE_FIELD_NUMBER = 1;
private boolean forUpgrade_;
/**
* required bool forUpgrade = 1;
*/
public boolean hasForUpgrade() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required bool forUpgrade = 1;
*/
public boolean getForUpgrade() {
return forUpgrade_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasForUpgrade()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeBool(1, forUpgrade_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBoolSize(1, forUpgrade_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto) obj;
if (hasForUpgrade() != other.hasForUpgrade()) return false;
if (hasForUpgrade()) {
if (getForUpgrade()
!= other.getForUpgrade()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasForUpgrade()) {
hash = (37 * hash) + FORUPGRADE_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getForUpgrade());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* forUpgrade - if true, clients are advised to wait for restart and quick
* upgrade restart is instrumented. Otherwise, datanode does
* the regular shutdown.
*
*
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShutdownDatanodeRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
forUpgrade_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.forUpgrade_ = forUpgrade_;
to_bitField0_ |= 0x00000001;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance()) return this;
if (other.hasForUpgrade()) {
setForUpgrade(other.getForUpgrade());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasForUpgrade()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private boolean forUpgrade_ ;
/**
* required bool forUpgrade = 1;
*/
public boolean hasForUpgrade() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required bool forUpgrade = 1;
*/
public boolean getForUpgrade() {
return forUpgrade_;
}
/**
* required bool forUpgrade = 1;
*/
public Builder setForUpgrade(boolean value) {
bitField0_ |= 0x00000001;
forUpgrade_ = value;
onChanged();
return this;
}
/**
* required bool forUpgrade = 1;
*/
public Builder clearForUpgrade() {
bitField0_ = (bitField0_ & ~0x00000001);
forUpgrade_ = false;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShutdownDatanodeRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShutdownDatanodeRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ShutdownDatanodeRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ShutdownDatanodeRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ShutdownDatanodeResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShutdownDatanodeResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeResponseProto}
*/
public static final class ShutdownDatanodeResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.ShutdownDatanodeResponseProto)
ShutdownDatanodeResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ShutdownDatanodeResponseProto.newBuilder() to construct.
private ShutdownDatanodeResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ShutdownDatanodeResponseProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ShutdownDatanodeResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.ShutdownDatanodeResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShutdownDatanodeResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShutdownDatanodeResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ShutdownDatanodeResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public ShutdownDatanodeResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new ShutdownDatanodeResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface EvictWritersRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.EvictWritersRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
*
** Tell datanode to evict active clients that are writing
*
*
* Protobuf type {@code hadoop.hdfs.EvictWritersRequestProto}
*/
public static final class EvictWritersRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.EvictWritersRequestProto)
EvictWritersRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use EvictWritersRequestProto.newBuilder() to construct.
private EvictWritersRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private EvictWritersRequestProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private EvictWritersRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
** Tell datanode to evict active clients that are writing
*
*
* Protobuf type {@code hadoop.hdfs.EvictWritersRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.EvictWritersRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.EvictWritersRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.EvictWritersRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public EvictWritersRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new EvictWritersRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface EvictWritersResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.EvictWritersResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.EvictWritersResponseProto}
*/
public static final class EvictWritersResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.EvictWritersResponseProto)
EvictWritersResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use EvictWritersResponseProto.newBuilder() to construct.
private EvictWritersResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private EvictWritersResponseProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private EvictWritersResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.EvictWritersResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.EvictWritersResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.EvictWritersResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.EvictWritersResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public EvictWritersResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new EvictWritersResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface GetDatanodeInfoRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.GetDatanodeInfoRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
*
**
* Ping datanode for liveness and quick info
*
*
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoRequestProto}
*/
public static final class GetDatanodeInfoRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.GetDatanodeInfoRequestProto)
GetDatanodeInfoRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use GetDatanodeInfoRequestProto.newBuilder() to construct.
private GetDatanodeInfoRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GetDatanodeInfoRequestProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetDatanodeInfoRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* Ping datanode for liveness and quick info
*
*
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.GetDatanodeInfoRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDatanodeInfoRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDatanodeInfoRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public GetDatanodeInfoRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new GetDatanodeInfoRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface GetDatanodeInfoResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.GetDatanodeInfoResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
boolean hasLocalInfo();
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getLocalInfo();
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder getLocalInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoResponseProto}
*/
public static final class GetDatanodeInfoResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.GetDatanodeInfoResponseProto)
GetDatanodeInfoResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use GetDatanodeInfoResponseProto.newBuilder() to construct.
private GetDatanodeInfoResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GetDatanodeInfoResponseProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetDatanodeInfoResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) != 0)) {
subBuilder = localInfo_.toBuilder();
}
localInfo_ = input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(localInfo_);
localInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.Builder.class);
}
private int bitField0_;
public static final int LOCALINFO_FIELD_NUMBER = 1;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto localInfo_;
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public boolean hasLocalInfo() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getLocalInfo() {
return localInfo_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance() : localInfo_;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder getLocalInfoOrBuilder() {
return localInfo_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance() : localInfo_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasLocalInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!getLocalInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getLocalInfo());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(1, getLocalInfo());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) obj;
if (hasLocalInfo() != other.hasLocalInfo()) return false;
if (hasLocalInfo()) {
if (!getLocalInfo()
.equals(other.getLocalInfo())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasLocalInfo()) {
hash = (37 * hash) + LOCALINFO_FIELD_NUMBER;
hash = (53 * hash) + getLocalInfo().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetDatanodeInfoResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.GetDatanodeInfoResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getLocalInfoFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (localInfoBuilder_ == null) {
localInfo_ = null;
} else {
localInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
if (localInfoBuilder_ == null) {
result.localInfo_ = localInfo_;
} else {
result.localInfo_ = localInfoBuilder_.build();
}
to_bitField0_ |= 0x00000001;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance()) return this;
if (other.hasLocalInfo()) {
mergeLocalInfo(other.getLocalInfo());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasLocalInfo()) {
return false;
}
if (!getLocalInfo().isInitialized()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto localInfo_;
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder> localInfoBuilder_;
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public boolean hasLocalInfo() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getLocalInfo() {
if (localInfoBuilder_ == null) {
return localInfo_ == null ? org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance() : localInfo_;
} else {
return localInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder setLocalInfo(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto value) {
if (localInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
localInfo_ = value;
onChanged();
} else {
localInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder setLocalInfo(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder builderForValue) {
if (localInfoBuilder_ == null) {
localInfo_ = builderForValue.build();
onChanged();
} else {
localInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder mergeLocalInfo(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto value) {
if (localInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0) &&
localInfo_ != null &&
localInfo_ != org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance()) {
localInfo_ =
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.newBuilder(localInfo_).mergeFrom(value).buildPartial();
} else {
localInfo_ = value;
}
onChanged();
} else {
localInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public Builder clearLocalInfo() {
if (localInfoBuilder_ == null) {
localInfo_ = null;
onChanged();
} else {
localInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder getLocalInfoBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getLocalInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder getLocalInfoOrBuilder() {
if (localInfoBuilder_ != null) {
return localInfoBuilder_.getMessageOrBuilder();
} else {
return localInfo_ == null ?
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance() : localInfo_;
}
}
/**
* required .hadoop.hdfs.DatanodeLocalInfoProto localInfo = 1;
*/
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder>
getLocalInfoFieldBuilder() {
if (localInfoBuilder_ == null) {
localInfoBuilder_ = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder>(
getLocalInfo(),
getParentForChildren(),
isClean());
localInfo_ = null;
}
return localInfoBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetDatanodeInfoResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetDatanodeInfoResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public GetDatanodeInfoResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new GetDatanodeInfoResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface GetVolumeReportRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.GetVolumeReportRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.GetVolumeReportRequestProto}
*/
public static final class GetVolumeReportRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.GetVolumeReportRequestProto)
GetVolumeReportRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use GetVolumeReportRequestProto.newBuilder() to construct.
private GetVolumeReportRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GetVolumeReportRequestProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetVolumeReportRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetVolumeReportRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.GetVolumeReportRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetVolumeReportRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetVolumeReportRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public GetVolumeReportRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new GetVolumeReportRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface GetVolumeReportResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.GetVolumeReportResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
java.util.List
getVolumeInfoList();
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getVolumeInfo(int index);
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
int getVolumeInfoCount();
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
java.util.List extends org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder>
getVolumeInfoOrBuilderList();
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder getVolumeInfoOrBuilder(
int index);
}
/**
* Protobuf type {@code hadoop.hdfs.GetVolumeReportResponseProto}
*/
public static final class GetVolumeReportResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.GetVolumeReportResponseProto)
GetVolumeReportResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use GetVolumeReportResponseProto.newBuilder() to construct.
private GetVolumeReportResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GetVolumeReportResponseProto() {
volumeInfo_ = java.util.Collections.emptyList();
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetVolumeReportResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
if (!((mutable_bitField0_ & 0x00000001) != 0)) {
volumeInfo_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
volumeInfo_.add(
input.readMessage(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.PARSER, extensionRegistry));
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) != 0)) {
volumeInfo_ = java.util.Collections.unmodifiableList(volumeInfo_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.Builder.class);
}
public static final int VOLUMEINFO_FIELD_NUMBER = 1;
private java.util.List volumeInfo_;
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public java.util.List getVolumeInfoList() {
return volumeInfo_;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public java.util.List extends org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder>
getVolumeInfoOrBuilderList() {
return volumeInfo_;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public int getVolumeInfoCount() {
return volumeInfo_.size();
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getVolumeInfo(int index) {
return volumeInfo_.get(index);
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder getVolumeInfoOrBuilder(
int index) {
return volumeInfo_.get(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
for (int i = 0; i < getVolumeInfoCount(); i++) {
if (!getVolumeInfo(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
for (int i = 0; i < volumeInfo_.size(); i++) {
output.writeMessage(1, volumeInfo_.get(i));
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < volumeInfo_.size(); i++) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteMessageSize(1, volumeInfo_.get(i));
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto) obj;
if (!getVolumeInfoList()
.equals(other.getVolumeInfoList())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getVolumeInfoCount() > 0) {
hash = (37 * hash) + VOLUMEINFO_FIELD_NUMBER;
hash = (53 * hash) + getVolumeInfoList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetVolumeReportResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.GetVolumeReportResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getVolumeInfoFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (volumeInfoBuilder_ == null) {
volumeInfo_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
volumeInfoBuilder_.clear();
}
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto(this);
int from_bitField0_ = bitField0_;
if (volumeInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0)) {
volumeInfo_ = java.util.Collections.unmodifiableList(volumeInfo_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.volumeInfo_ = volumeInfo_;
} else {
result.volumeInfo_ = volumeInfoBuilder_.build();
}
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance()) return this;
if (volumeInfoBuilder_ == null) {
if (!other.volumeInfo_.isEmpty()) {
if (volumeInfo_.isEmpty()) {
volumeInfo_ = other.volumeInfo_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureVolumeInfoIsMutable();
volumeInfo_.addAll(other.volumeInfo_);
}
onChanged();
}
} else {
if (!other.volumeInfo_.isEmpty()) {
if (volumeInfoBuilder_.isEmpty()) {
volumeInfoBuilder_.dispose();
volumeInfoBuilder_ = null;
volumeInfo_ = other.volumeInfo_;
bitField0_ = (bitField0_ & ~0x00000001);
volumeInfoBuilder_ =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getVolumeInfoFieldBuilder() : null;
} else {
volumeInfoBuilder_.addAllMessages(other.volumeInfo_);
}
}
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
for (int i = 0; i < getVolumeInfoCount(); i++) {
if (!getVolumeInfo(i).isInitialized()) {
return false;
}
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.util.List volumeInfo_ =
java.util.Collections.emptyList();
private void ensureVolumeInfoIsMutable() {
if (!((bitField0_ & 0x00000001) != 0)) {
volumeInfo_ = new java.util.ArrayList(volumeInfo_);
bitField0_ |= 0x00000001;
}
}
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder> volumeInfoBuilder_;
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public java.util.List getVolumeInfoList() {
if (volumeInfoBuilder_ == null) {
return java.util.Collections.unmodifiableList(volumeInfo_);
} else {
return volumeInfoBuilder_.getMessageList();
}
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public int getVolumeInfoCount() {
if (volumeInfoBuilder_ == null) {
return volumeInfo_.size();
} else {
return volumeInfoBuilder_.getCount();
}
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getVolumeInfo(int index) {
if (volumeInfoBuilder_ == null) {
return volumeInfo_.get(index);
} else {
return volumeInfoBuilder_.getMessage(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder setVolumeInfo(
int index, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto value) {
if (volumeInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVolumeInfoIsMutable();
volumeInfo_.set(index, value);
onChanged();
} else {
volumeInfoBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder setVolumeInfo(
int index, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder builderForValue) {
if (volumeInfoBuilder_ == null) {
ensureVolumeInfoIsMutable();
volumeInfo_.set(index, builderForValue.build());
onChanged();
} else {
volumeInfoBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder addVolumeInfo(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto value) {
if (volumeInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVolumeInfoIsMutable();
volumeInfo_.add(value);
onChanged();
} else {
volumeInfoBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder addVolumeInfo(
int index, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto value) {
if (volumeInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVolumeInfoIsMutable();
volumeInfo_.add(index, value);
onChanged();
} else {
volumeInfoBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder addVolumeInfo(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder builderForValue) {
if (volumeInfoBuilder_ == null) {
ensureVolumeInfoIsMutable();
volumeInfo_.add(builderForValue.build());
onChanged();
} else {
volumeInfoBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder addVolumeInfo(
int index, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder builderForValue) {
if (volumeInfoBuilder_ == null) {
ensureVolumeInfoIsMutable();
volumeInfo_.add(index, builderForValue.build());
onChanged();
} else {
volumeInfoBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder addAllVolumeInfo(
java.lang.Iterable extends org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto> values) {
if (volumeInfoBuilder_ == null) {
ensureVolumeInfoIsMutable();
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
values, volumeInfo_);
onChanged();
} else {
volumeInfoBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder clearVolumeInfo() {
if (volumeInfoBuilder_ == null) {
volumeInfo_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
volumeInfoBuilder_.clear();
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public Builder removeVolumeInfo(int index) {
if (volumeInfoBuilder_ == null) {
ensureVolumeInfoIsMutable();
volumeInfo_.remove(index);
onChanged();
} else {
volumeInfoBuilder_.remove(index);
}
return this;
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder getVolumeInfoBuilder(
int index) {
return getVolumeInfoFieldBuilder().getBuilder(index);
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder getVolumeInfoOrBuilder(
int index) {
if (volumeInfoBuilder_ == null) {
return volumeInfo_.get(index); } else {
return volumeInfoBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public java.util.List extends org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder>
getVolumeInfoOrBuilderList() {
if (volumeInfoBuilder_ != null) {
return volumeInfoBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(volumeInfo_);
}
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder addVolumeInfoBuilder() {
return getVolumeInfoFieldBuilder().addBuilder(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder addVolumeInfoBuilder(
int index) {
return getVolumeInfoFieldBuilder().addBuilder(
index, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance());
}
/**
* repeated .hadoop.hdfs.DatanodeVolumeInfoProto volumeInfo = 1;
*/
public java.util.List
getVolumeInfoBuilderList() {
return getVolumeInfoFieldBuilder().getBuilderList();
}
private org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder>
getVolumeInfoFieldBuilder() {
if (volumeInfoBuilder_ == null) {
volumeInfoBuilder_ = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder>(
volumeInfo_,
((bitField0_ & 0x00000001) != 0),
getParentForChildren(),
isClean());
volumeInfo_ = null;
}
return volumeInfoBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetVolumeReportResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetVolumeReportResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public GetVolumeReportResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new GetVolumeReportResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface TriggerBlockReportRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.TriggerBlockReportRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required bool incremental = 1;
*/
boolean hasIncremental();
/**
* required bool incremental = 1;
*/
boolean getIncremental();
/**
* optional string nnAddress = 2;
*/
boolean hasNnAddress();
/**
* optional string nnAddress = 2;
*/
java.lang.String getNnAddress();
/**
* optional string nnAddress = 2;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getNnAddressBytes();
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportRequestProto}
*/
public static final class TriggerBlockReportRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.TriggerBlockReportRequestProto)
TriggerBlockReportRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use TriggerBlockReportRequestProto.newBuilder() to construct.
private TriggerBlockReportRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private TriggerBlockReportRequestProto() {
nnAddress_ = "";
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TriggerBlockReportRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
incremental_ = input.readBool();
break;
}
case 18: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000002;
nnAddress_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.Builder.class);
}
private int bitField0_;
public static final int INCREMENTAL_FIELD_NUMBER = 1;
private boolean incremental_;
/**
* required bool incremental = 1;
*/
public boolean hasIncremental() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required bool incremental = 1;
*/
public boolean getIncremental() {
return incremental_;
}
public static final int NNADDRESS_FIELD_NUMBER = 2;
private volatile java.lang.Object nnAddress_;
/**
* optional string nnAddress = 2;
*/
public boolean hasNnAddress() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string nnAddress = 2;
*/
public java.lang.String getNnAddress() {
java.lang.Object ref = nnAddress_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
nnAddress_ = s;
}
return s;
}
}
/**
* optional string nnAddress = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getNnAddressBytes() {
java.lang.Object ref = nnAddress_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
nnAddress_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasIncremental()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeBool(1, incremental_);
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, nnAddress_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBoolSize(1, incremental_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(2, nnAddress_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto) obj;
if (hasIncremental() != other.hasIncremental()) return false;
if (hasIncremental()) {
if (getIncremental()
!= other.getIncremental()) return false;
}
if (hasNnAddress() != other.hasNnAddress()) return false;
if (hasNnAddress()) {
if (!getNnAddress()
.equals(other.getNnAddress())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasIncremental()) {
hash = (37 * hash) + INCREMENTAL_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getIncremental());
}
if (hasNnAddress()) {
hash = (37 * hash) + NNADDRESS_FIELD_NUMBER;
hash = (53 * hash) + getNnAddress().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.TriggerBlockReportRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
incremental_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
nnAddress_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.incremental_ = incremental_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.nnAddress_ = nnAddress_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance()) return this;
if (other.hasIncremental()) {
setIncremental(other.getIncremental());
}
if (other.hasNnAddress()) {
bitField0_ |= 0x00000002;
nnAddress_ = other.nnAddress_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasIncremental()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private boolean incremental_ ;
/**
* required bool incremental = 1;
*/
public boolean hasIncremental() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required bool incremental = 1;
*/
public boolean getIncremental() {
return incremental_;
}
/**
* required bool incremental = 1;
*/
public Builder setIncremental(boolean value) {
bitField0_ |= 0x00000001;
incremental_ = value;
onChanged();
return this;
}
/**
* required bool incremental = 1;
*/
public Builder clearIncremental() {
bitField0_ = (bitField0_ & ~0x00000001);
incremental_ = false;
onChanged();
return this;
}
private java.lang.Object nnAddress_ = "";
/**
* optional string nnAddress = 2;
*/
public boolean hasNnAddress() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string nnAddress = 2;
*/
public java.lang.String getNnAddress() {
java.lang.Object ref = nnAddress_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
nnAddress_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string nnAddress = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getNnAddressBytes() {
java.lang.Object ref = nnAddress_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
nnAddress_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string nnAddress = 2;
*/
public Builder setNnAddress(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
nnAddress_ = value;
onChanged();
return this;
}
/**
* optional string nnAddress = 2;
*/
public Builder clearNnAddress() {
bitField0_ = (bitField0_ & ~0x00000002);
nnAddress_ = getDefaultInstance().getNnAddress();
onChanged();
return this;
}
/**
* optional string nnAddress = 2;
*/
public Builder setNnAddressBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
nnAddress_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.TriggerBlockReportRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.TriggerBlockReportRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public TriggerBlockReportRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new TriggerBlockReportRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface TriggerBlockReportResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.TriggerBlockReportResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportResponseProto}
*/
public static final class TriggerBlockReportResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.TriggerBlockReportResponseProto)
TriggerBlockReportResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use TriggerBlockReportResponseProto.newBuilder() to construct.
private TriggerBlockReportResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private TriggerBlockReportResponseProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TriggerBlockReportResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.TriggerBlockReportResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.TriggerBlockReportResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.TriggerBlockReportResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.TriggerBlockReportResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public TriggerBlockReportResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new TriggerBlockReportResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface GetBalancerBandwidthRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.GetBalancerBandwidthRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.hdfs.GetBalancerBandwidthRequestProto}
*/
public static final class GetBalancerBandwidthRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.GetBalancerBandwidthRequestProto)
GetBalancerBandwidthRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use GetBalancerBandwidthRequestProto.newBuilder() to construct.
private GetBalancerBandwidthRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GetBalancerBandwidthRequestProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetBalancerBandwidthRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.hdfs.GetBalancerBandwidthRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.GetBalancerBandwidthRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBalancerBandwidthRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBalancerBandwidthRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public GetBalancerBandwidthRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new GetBalancerBandwidthRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface GetBalancerBandwidthResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.GetBalancerBandwidthResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required uint64 bandwidth = 1;
*/
boolean hasBandwidth();
/**
* required uint64 bandwidth = 1;
*/
long getBandwidth();
}
/**
*
**
* bandwidth - balancer bandwidth value of the datanode.
*
*
* Protobuf type {@code hadoop.hdfs.GetBalancerBandwidthResponseProto}
*/
public static final class GetBalancerBandwidthResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.GetBalancerBandwidthResponseProto)
GetBalancerBandwidthResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use GetBalancerBandwidthResponseProto.newBuilder() to construct.
private GetBalancerBandwidthResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GetBalancerBandwidthResponseProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetBalancerBandwidthResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
bandwidth_ = input.readUInt64();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.Builder.class);
}
private int bitField0_;
public static final int BANDWIDTH_FIELD_NUMBER = 1;
private long bandwidth_;
/**
* required uint64 bandwidth = 1;
*/
public boolean hasBandwidth() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required uint64 bandwidth = 1;
*/
public long getBandwidth() {
return bandwidth_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasBandwidth()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeUInt64(1, bandwidth_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteUInt64Size(1, bandwidth_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto) obj;
if (hasBandwidth() != other.hasBandwidth()) return false;
if (hasBandwidth()) {
if (getBandwidth()
!= other.getBandwidth()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasBandwidth()) {
hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getBandwidth());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* bandwidth - balancer bandwidth value of the datanode.
*
*
* Protobuf type {@code hadoop.hdfs.GetBalancerBandwidthResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.GetBalancerBandwidthResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bandwidth_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.bandwidth_ = bandwidth_;
to_bitField0_ |= 0x00000001;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance()) return this;
if (other.hasBandwidth()) {
setBandwidth(other.getBandwidth());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasBandwidth()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private long bandwidth_ ;
/**
* required uint64 bandwidth = 1;
*/
public boolean hasBandwidth() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required uint64 bandwidth = 1;
*/
public long getBandwidth() {
return bandwidth_;
}
/**
* required uint64 bandwidth = 1;
*/
public Builder setBandwidth(long value) {
bitField0_ |= 0x00000001;
bandwidth_ = value;
onChanged();
return this;
}
/**
* required uint64 bandwidth = 1;
*/
public Builder clearBandwidth() {
bitField0_ = (bitField0_ & ~0x00000001);
bandwidth_ = 0L;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetBalancerBandwidthResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.GetBalancerBandwidthResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public GetBalancerBandwidthResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new GetBalancerBandwidthResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface SubmitDiskBalancerPlanRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.SubmitDiskBalancerPlanRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
boolean hasPlanID();
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
java.lang.String getPlanID();
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanIDBytes();
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
boolean hasPlan();
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
java.lang.String getPlan();
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanBytes();
/**
*
* Plan version number
*
*
* optional uint64 planVersion = 3;
*/
boolean hasPlanVersion();
/**
*
* Plan version number
*
*
* optional uint64 planVersion = 3;
*/
long getPlanVersion();
/**
*
* Ignore date checks on this plan.
*
*
* optional bool ignoreDateCheck = 4;
*/
boolean hasIgnoreDateCheck();
/**
*
* Ignore date checks on this plan.
*
*
* optional bool ignoreDateCheck = 4;
*/
boolean getIgnoreDateCheck();
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
boolean hasPlanFile();
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
java.lang.String getPlanFile();
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanFileBytes();
}
/**
*
**
* This message allows a client to submit a disk
* balancer plan to a data node.
*
*
* Protobuf type {@code hadoop.hdfs.SubmitDiskBalancerPlanRequestProto}
*/
public static final class SubmitDiskBalancerPlanRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.SubmitDiskBalancerPlanRequestProto)
SubmitDiskBalancerPlanRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use SubmitDiskBalancerPlanRequestProto.newBuilder() to construct.
private SubmitDiskBalancerPlanRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private SubmitDiskBalancerPlanRequestProto() {
planID_ = "";
plan_ = "";
planFile_ = "";
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SubmitDiskBalancerPlanRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000001;
planID_ = bs;
break;
}
case 18: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000002;
plan_ = bs;
break;
}
case 24: {
bitField0_ |= 0x00000004;
planVersion_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
ignoreDateCheck_ = input.readBool();
break;
}
case 42: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000010;
planFile_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.Builder.class);
}
private int bitField0_;
public static final int PLANID_FIELD_NUMBER = 1;
private volatile java.lang.Object planID_;
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planID_ = s;
}
return s;
}
}
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int PLAN_FIELD_NUMBER = 2;
private volatile java.lang.Object plan_;
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
public boolean hasPlan() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
public java.lang.String getPlan() {
java.lang.Object ref = plan_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
plan_ = s;
}
return s;
}
}
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanBytes() {
java.lang.Object ref = plan_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
plan_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int PLANVERSION_FIELD_NUMBER = 3;
private long planVersion_;
/**
*
* Plan version number
*
*
* optional uint64 planVersion = 3;
*/
public boolean hasPlanVersion() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
* Plan version number
*
*
* optional uint64 planVersion = 3;
*/
public long getPlanVersion() {
return planVersion_;
}
public static final int IGNOREDATECHECK_FIELD_NUMBER = 4;
private boolean ignoreDateCheck_;
/**
*
* Ignore date checks on this plan.
*
*
* optional bool ignoreDateCheck = 4;
*/
public boolean hasIgnoreDateCheck() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
* Ignore date checks on this plan.
*
*
* optional bool ignoreDateCheck = 4;
*/
public boolean getIgnoreDateCheck() {
return ignoreDateCheck_;
}
public static final int PLANFILE_FIELD_NUMBER = 5;
private volatile java.lang.Object planFile_;
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
public boolean hasPlanFile() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
public java.lang.String getPlanFile() {
java.lang.Object ref = planFile_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planFile_ = s;
}
return s;
}
}
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanFileBytes() {
java.lang.Object ref = planFile_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planFile_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasPlanID()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPlan()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasPlanFile()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, planID_);
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, plan_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeUInt64(3, planVersion_);
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeBool(4, ignoreDateCheck_);
}
if (((bitField0_ & 0x00000010) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, planFile_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(1, planID_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(2, plan_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteUInt64Size(3, planVersion_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteBoolSize(4, ignoreDateCheck_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(5, planFile_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto) obj;
if (hasPlanID() != other.hasPlanID()) return false;
if (hasPlanID()) {
if (!getPlanID()
.equals(other.getPlanID())) return false;
}
if (hasPlan() != other.hasPlan()) return false;
if (hasPlan()) {
if (!getPlan()
.equals(other.getPlan())) return false;
}
if (hasPlanVersion() != other.hasPlanVersion()) return false;
if (hasPlanVersion()) {
if (getPlanVersion()
!= other.getPlanVersion()) return false;
}
if (hasIgnoreDateCheck() != other.hasIgnoreDateCheck()) return false;
if (hasIgnoreDateCheck()) {
if (getIgnoreDateCheck()
!= other.getIgnoreDateCheck()) return false;
}
if (hasPlanFile() != other.hasPlanFile()) return false;
if (hasPlanFile()) {
if (!getPlanFile()
.equals(other.getPlanFile())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasPlanID()) {
hash = (37 * hash) + PLANID_FIELD_NUMBER;
hash = (53 * hash) + getPlanID().hashCode();
}
if (hasPlan()) {
hash = (37 * hash) + PLAN_FIELD_NUMBER;
hash = (53 * hash) + getPlan().hashCode();
}
if (hasPlanVersion()) {
hash = (37 * hash) + PLANVERSION_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
getPlanVersion());
}
if (hasIgnoreDateCheck()) {
hash = (37 * hash) + IGNOREDATECHECK_FIELD_NUMBER;
hash = (53 * hash) + org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
getIgnoreDateCheck());
}
if (hasPlanFile()) {
hash = (37 * hash) + PLANFILE_FIELD_NUMBER;
hash = (53 * hash) + getPlanFile().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* This message allows a client to submit a disk
* balancer plan to a data node.
*
*
* Protobuf type {@code hadoop.hdfs.SubmitDiskBalancerPlanRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.SubmitDiskBalancerPlanRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
planID_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
plan_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
planVersion_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
ignoreDateCheck_ = false;
bitField0_ = (bitField0_ & ~0x00000008);
planFile_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.planID_ = planID_;
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.plan_ = plan_;
if (((from_bitField0_ & 0x00000004) != 0)) {
result.planVersion_ = planVersion_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.ignoreDateCheck_ = ignoreDateCheck_;
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
to_bitField0_ |= 0x00000010;
}
result.planFile_ = planFile_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.getDefaultInstance()) return this;
if (other.hasPlanID()) {
bitField0_ |= 0x00000001;
planID_ = other.planID_;
onChanged();
}
if (other.hasPlan()) {
bitField0_ |= 0x00000002;
plan_ = other.plan_;
onChanged();
}
if (other.hasPlanVersion()) {
setPlanVersion(other.getPlanVersion());
}
if (other.hasIgnoreDateCheck()) {
setIgnoreDateCheck(other.getIgnoreDateCheck());
}
if (other.hasPlanFile()) {
bitField0_ |= 0x00000010;
planFile_ = other.planFile_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasPlanID()) {
return false;
}
if (!hasPlan()) {
return false;
}
if (!hasPlanFile()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.lang.Object planID_ = "";
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planID_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
public Builder setPlanID(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
planID_ = value;
onChanged();
return this;
}
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
public Builder clearPlanID() {
bitField0_ = (bitField0_ & ~0x00000001);
planID_ = getDefaultInstance().getPlanID();
onChanged();
return this;
}
/**
*
* A hash of the plan like SHA-1
*
*
* required string planID = 1;
*/
public Builder setPlanIDBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
planID_ = value;
onChanged();
return this;
}
private java.lang.Object plan_ = "";
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
public boolean hasPlan() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
public java.lang.String getPlan() {
java.lang.Object ref = plan_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
plan_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanBytes() {
java.lang.Object ref = plan_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
plan_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
public Builder setPlan(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
plan_ = value;
onChanged();
return this;
}
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
public Builder clearPlan() {
bitField0_ = (bitField0_ & ~0x00000002);
plan_ = getDefaultInstance().getPlan();
onChanged();
return this;
}
/**
*
* Plan file data in Json format
*
*
* required string plan = 2;
*/
public Builder setPlanBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
plan_ = value;
onChanged();
return this;
}
private long planVersion_ ;
/**
*
* Plan version number
*
*
* optional uint64 planVersion = 3;
*/
public boolean hasPlanVersion() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
* Plan version number
*
*
* optional uint64 planVersion = 3;
*/
public long getPlanVersion() {
return planVersion_;
}
/**
*
* Plan version number
*
*
* optional uint64 planVersion = 3;
*/
public Builder setPlanVersion(long value) {
bitField0_ |= 0x00000004;
planVersion_ = value;
onChanged();
return this;
}
/**
*
* Plan version number
*
*
* optional uint64 planVersion = 3;
*/
public Builder clearPlanVersion() {
bitField0_ = (bitField0_ & ~0x00000004);
planVersion_ = 0L;
onChanged();
return this;
}
private boolean ignoreDateCheck_ ;
/**
*
* Ignore date checks on this plan.
*
*
* optional bool ignoreDateCheck = 4;
*/
public boolean hasIgnoreDateCheck() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
* Ignore date checks on this plan.
*
*
* optional bool ignoreDateCheck = 4;
*/
public boolean getIgnoreDateCheck() {
return ignoreDateCheck_;
}
/**
*
* Ignore date checks on this plan.
*
*
* optional bool ignoreDateCheck = 4;
*/
public Builder setIgnoreDateCheck(boolean value) {
bitField0_ |= 0x00000008;
ignoreDateCheck_ = value;
onChanged();
return this;
}
/**
*
* Ignore date checks on this plan.
*
*
* optional bool ignoreDateCheck = 4;
*/
public Builder clearIgnoreDateCheck() {
bitField0_ = (bitField0_ & ~0x00000008);
ignoreDateCheck_ = false;
onChanged();
return this;
}
private java.lang.Object planFile_ = "";
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
public boolean hasPlanFile() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
public java.lang.String getPlanFile() {
java.lang.Object ref = planFile_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planFile_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanFileBytes() {
java.lang.Object ref = planFile_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planFile_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
public Builder setPlanFile(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
planFile_ = value;
onChanged();
return this;
}
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
public Builder clearPlanFile() {
bitField0_ = (bitField0_ & ~0x00000010);
planFile_ = getDefaultInstance().getPlanFile();
onChanged();
return this;
}
/**
*
* Plan file path
*
*
* required string planFile = 5;
*/
public Builder setPlanFileBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
planFile_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.SubmitDiskBalancerPlanRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.SubmitDiskBalancerPlanRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public SubmitDiskBalancerPlanRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new SubmitDiskBalancerPlanRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface SubmitDiskBalancerPlanResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.SubmitDiskBalancerPlanResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
*
**
* Response from the DataNode on Plan Submit request
*
*
* Protobuf type {@code hadoop.hdfs.SubmitDiskBalancerPlanResponseProto}
*/
public static final class SubmitDiskBalancerPlanResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.SubmitDiskBalancerPlanResponseProto)
SubmitDiskBalancerPlanResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use SubmitDiskBalancerPlanResponseProto.newBuilder() to construct.
private SubmitDiskBalancerPlanResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private SubmitDiskBalancerPlanResponseProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SubmitDiskBalancerPlanResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* Response from the DataNode on Plan Submit request
*
*
* Protobuf type {@code hadoop.hdfs.SubmitDiskBalancerPlanResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.SubmitDiskBalancerPlanResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.SubmitDiskBalancerPlanResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.SubmitDiskBalancerPlanResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public SubmitDiskBalancerPlanResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new SubmitDiskBalancerPlanResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface CancelPlanRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.CancelPlanRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required string planID = 1;
*/
boolean hasPlanID();
/**
* required string planID = 1;
*/
java.lang.String getPlanID();
/**
* required string planID = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanIDBytes();
}
/**
*
**
* This message describes a request to cancel an
* outstanding disk balancer plan
*
*
* Protobuf type {@code hadoop.hdfs.CancelPlanRequestProto}
*/
public static final class CancelPlanRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.CancelPlanRequestProto)
CancelPlanRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use CancelPlanRequestProto.newBuilder() to construct.
private CancelPlanRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private CancelPlanRequestProto() {
planID_ = "";
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CancelPlanRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000001;
planID_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.Builder.class);
}
private int bitField0_;
public static final int PLANID_FIELD_NUMBER = 1;
private volatile java.lang.Object planID_;
/**
* required string planID = 1;
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string planID = 1;
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planID_ = s;
}
return s;
}
}
/**
* required string planID = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasPlanID()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, planID_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(1, planID_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto) obj;
if (hasPlanID() != other.hasPlanID()) return false;
if (hasPlanID()) {
if (!getPlanID()
.equals(other.getPlanID())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasPlanID()) {
hash = (37 * hash) + PLANID_FIELD_NUMBER;
hash = (53 * hash) + getPlanID().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* This message describes a request to cancel an
* outstanding disk balancer plan
*
*
* Protobuf type {@code hadoop.hdfs.CancelPlanRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.CancelPlanRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
planID_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.planID_ = planID_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.getDefaultInstance()) return this;
if (other.hasPlanID()) {
bitField0_ |= 0x00000001;
planID_ = other.planID_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasPlanID()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.lang.Object planID_ = "";
/**
* required string planID = 1;
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string planID = 1;
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planID_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string planID = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string planID = 1;
*/
public Builder setPlanID(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
planID_ = value;
onChanged();
return this;
}
/**
* required string planID = 1;
*/
public Builder clearPlanID() {
bitField0_ = (bitField0_ & ~0x00000001);
planID_ = getDefaultInstance().getPlanID();
onChanged();
return this;
}
/**
* required string planID = 1;
*/
public Builder setPlanIDBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
planID_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CancelPlanRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CancelPlanRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public CancelPlanRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new CancelPlanRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface CancelPlanResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.CancelPlanResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
*
**
* This is the response for the cancellation request
*
*
* Protobuf type {@code hadoop.hdfs.CancelPlanResponseProto}
*/
public static final class CancelPlanResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.CancelPlanResponseProto)
CancelPlanResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use CancelPlanResponseProto.newBuilder() to construct.
private CancelPlanResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private CancelPlanResponseProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CancelPlanResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* This is the response for the cancellation request
*
*
* Protobuf type {@code hadoop.hdfs.CancelPlanResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.CancelPlanResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.CancelPlanResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.CancelPlanResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public CancelPlanResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new CancelPlanResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface QueryPlanStatusRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.QueryPlanStatusRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
}
/**
*
**
* This message allows a client to query data node to see
* if a disk balancer plan is executing and if so what is
* the status.
*
*
* Protobuf type {@code hadoop.hdfs.QueryPlanStatusRequestProto}
*/
public static final class QueryPlanStatusRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.QueryPlanStatusRequestProto)
QueryPlanStatusRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use QueryPlanStatusRequestProto.newBuilder() to construct.
private QueryPlanStatusRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private QueryPlanStatusRequestProto() {
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private QueryPlanStatusRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.Builder.class);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto) obj;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* This message allows a client to query data node to see
* if a disk balancer plan is executing and if so what is
* the status.
*
*
* Protobuf type {@code hadoop.hdfs.QueryPlanStatusRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.QueryPlanStatusRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.QueryPlanStatusRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.QueryPlanStatusRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public QueryPlanStatusRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new QueryPlanStatusRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface QueryPlanStatusResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.QueryPlanStatusResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* optional uint32 result = 1;
*/
boolean hasResult();
/**
* optional uint32 result = 1;
*/
int getResult();
/**
* optional string planID = 2;
*/
boolean hasPlanID();
/**
* optional string planID = 2;
*/
java.lang.String getPlanID();
/**
* optional string planID = 2;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanIDBytes();
/**
* optional string currentStatus = 3;
*/
boolean hasCurrentStatus();
/**
* optional string currentStatus = 3;
*/
java.lang.String getCurrentStatus();
/**
* optional string currentStatus = 3;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getCurrentStatusBytes();
/**
* optional string planFile = 4;
*/
boolean hasPlanFile();
/**
* optional string planFile = 4;
*/
java.lang.String getPlanFile();
/**
* optional string planFile = 4;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanFileBytes();
}
/**
*
**
* This message describes a plan if it is in progress
*
*
* Protobuf type {@code hadoop.hdfs.QueryPlanStatusResponseProto}
*/
public static final class QueryPlanStatusResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.QueryPlanStatusResponseProto)
QueryPlanStatusResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use QueryPlanStatusResponseProto.newBuilder() to construct.
private QueryPlanStatusResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private QueryPlanStatusResponseProto() {
planID_ = "";
currentStatus_ = "";
planFile_ = "";
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private QueryPlanStatusResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
result_ = input.readUInt32();
break;
}
case 18: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000002;
planID_ = bs;
break;
}
case 26: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000004;
currentStatus_ = bs;
break;
}
case 34: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000008;
planFile_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.Builder.class);
}
private int bitField0_;
public static final int RESULT_FIELD_NUMBER = 1;
private int result_;
/**
* optional uint32 result = 1;
*/
public boolean hasResult() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional uint32 result = 1;
*/
public int getResult() {
return result_;
}
public static final int PLANID_FIELD_NUMBER = 2;
private volatile java.lang.Object planID_;
/**
* optional string planID = 2;
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string planID = 2;
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planID_ = s;
}
return s;
}
}
/**
* optional string planID = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int CURRENTSTATUS_FIELD_NUMBER = 3;
private volatile java.lang.Object currentStatus_;
/**
* optional string currentStatus = 3;
*/
public boolean hasCurrentStatus() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional string currentStatus = 3;
*/
public java.lang.String getCurrentStatus() {
java.lang.Object ref = currentStatus_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
currentStatus_ = s;
}
return s;
}
}
/**
* optional string currentStatus = 3;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getCurrentStatusBytes() {
java.lang.Object ref = currentStatus_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
currentStatus_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
public static final int PLANFILE_FIELD_NUMBER = 4;
private volatile java.lang.Object planFile_;
/**
* optional string planFile = 4;
*/
public boolean hasPlanFile() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional string planFile = 4;
*/
public java.lang.String getPlanFile() {
java.lang.Object ref = planFile_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planFile_ = s;
}
return s;
}
}
/**
* optional string planFile = 4;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanFileBytes() {
java.lang.Object ref = planFile_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planFile_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeUInt32(1, result_);
}
if (((bitField0_ & 0x00000002) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, planID_);
}
if (((bitField0_ & 0x00000004) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, currentStatus_);
}
if (((bitField0_ & 0x00000008) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, planFile_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
.org.apache.hadoop.shaded.com.uteUInt32Size(1, result_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(2, planID_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(3, currentStatus_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(4, planFile_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto) obj;
if (hasResult() != other.hasResult()) return false;
if (hasResult()) {
if (getResult()
!= other.getResult()) return false;
}
if (hasPlanID() != other.hasPlanID()) return false;
if (hasPlanID()) {
if (!getPlanID()
.equals(other.getPlanID())) return false;
}
if (hasCurrentStatus() != other.hasCurrentStatus()) return false;
if (hasCurrentStatus()) {
if (!getCurrentStatus()
.equals(other.getCurrentStatus())) return false;
}
if (hasPlanFile() != other.hasPlanFile()) return false;
if (hasPlanFile()) {
if (!getPlanFile()
.equals(other.getPlanFile())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasResult()) {
hash = (37 * hash) + RESULT_FIELD_NUMBER;
hash = (53 * hash) + getResult();
}
if (hasPlanID()) {
hash = (37 * hash) + PLANID_FIELD_NUMBER;
hash = (53 * hash) + getPlanID().hashCode();
}
if (hasCurrentStatus()) {
hash = (37 * hash) + CURRENTSTATUS_FIELD_NUMBER;
hash = (53 * hash) + getCurrentStatus().hashCode();
}
if (hasPlanFile()) {
hash = (37 * hash) + PLANFILE_FIELD_NUMBER;
hash = (53 * hash) + getPlanFile().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* This message describes a plan if it is in progress
*
*
* Protobuf type {@code hadoop.hdfs.QueryPlanStatusResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.QueryPlanStatusResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
result_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
planID_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
currentStatus_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
planFile_ = "";
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.result_ = result_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.planID_ = planID_;
if (((from_bitField0_ & 0x00000004) != 0)) {
to_bitField0_ |= 0x00000004;
}
result.currentStatus_ = currentStatus_;
if (((from_bitField0_ & 0x00000008) != 0)) {
to_bitField0_ |= 0x00000008;
}
result.planFile_ = planFile_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance()) return this;
if (other.hasResult()) {
setResult(other.getResult());
}
if (other.hasPlanID()) {
bitField0_ |= 0x00000002;
planID_ = other.planID_;
onChanged();
}
if (other.hasCurrentStatus()) {
bitField0_ |= 0x00000004;
currentStatus_ = other.currentStatus_;
onChanged();
}
if (other.hasPlanFile()) {
bitField0_ |= 0x00000008;
planFile_ = other.planFile_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private int result_ ;
/**
* optional uint32 result = 1;
*/
public boolean hasResult() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional uint32 result = 1;
*/
public int getResult() {
return result_;
}
/**
* optional uint32 result = 1;
*/
public Builder setResult(int value) {
bitField0_ |= 0x00000001;
result_ = value;
onChanged();
return this;
}
/**
* optional uint32 result = 1;
*/
public Builder clearResult() {
bitField0_ = (bitField0_ & ~0x00000001);
result_ = 0;
onChanged();
return this;
}
private java.lang.Object planID_ = "";
/**
* optional string planID = 2;
*/
public boolean hasPlanID() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string planID = 2;
*/
public java.lang.String getPlanID() {
java.lang.Object ref = planID_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planID_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string planID = 2;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanIDBytes() {
java.lang.Object ref = planID_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planID_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string planID = 2;
*/
public Builder setPlanID(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
planID_ = value;
onChanged();
return this;
}
/**
* optional string planID = 2;
*/
public Builder clearPlanID() {
bitField0_ = (bitField0_ & ~0x00000002);
planID_ = getDefaultInstance().getPlanID();
onChanged();
return this;
}
/**
* optional string planID = 2;
*/
public Builder setPlanIDBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
planID_ = value;
onChanged();
return this;
}
private java.lang.Object currentStatus_ = "";
/**
* optional string currentStatus = 3;
*/
public boolean hasCurrentStatus() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* optional string currentStatus = 3;
*/
public java.lang.String getCurrentStatus() {
java.lang.Object ref = currentStatus_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
currentStatus_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string currentStatus = 3;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getCurrentStatusBytes() {
java.lang.Object ref = currentStatus_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
currentStatus_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string currentStatus = 3;
*/
public Builder setCurrentStatus(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
currentStatus_ = value;
onChanged();
return this;
}
/**
* optional string currentStatus = 3;
*/
public Builder clearCurrentStatus() {
bitField0_ = (bitField0_ & ~0x00000004);
currentStatus_ = getDefaultInstance().getCurrentStatus();
onChanged();
return this;
}
/**
* optional string currentStatus = 3;
*/
public Builder setCurrentStatusBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
currentStatus_ = value;
onChanged();
return this;
}
private java.lang.Object planFile_ = "";
/**
* optional string planFile = 4;
*/
public boolean hasPlanFile() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* optional string planFile = 4;
*/
public java.lang.String getPlanFile() {
java.lang.Object ref = planFile_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
planFile_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string planFile = 4;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getPlanFileBytes() {
java.lang.Object ref = planFile_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
planFile_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* optional string planFile = 4;
*/
public Builder setPlanFile(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
planFile_ = value;
onChanged();
return this;
}
/**
* optional string planFile = 4;
*/
public Builder clearPlanFile() {
bitField0_ = (bitField0_ & ~0x00000008);
planFile_ = getDefaultInstance().getPlanFile();
onChanged();
return this;
}
/**
* optional string planFile = 4;
*/
public Builder setPlanFileBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
planFile_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.QueryPlanStatusResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.QueryPlanStatusResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public QueryPlanStatusResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new QueryPlanStatusResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface DiskBalancerSettingRequestProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.DiskBalancerSettingRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required string key = 1;
*/
boolean hasKey();
/**
* required string key = 1;
*/
java.lang.String getKey();
/**
* required string key = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getKeyBytes();
}
/**
*
**
* This message sends a request to data node get a specific setting
* that is used by disk balancer.
*
*
* Protobuf type {@code hadoop.hdfs.DiskBalancerSettingRequestProto}
*/
public static final class DiskBalancerSettingRequestProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.DiskBalancerSettingRequestProto)
DiskBalancerSettingRequestProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use DiskBalancerSettingRequestProto.newBuilder() to construct.
private DiskBalancerSettingRequestProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private DiskBalancerSettingRequestProto() {
key_ = "";
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DiskBalancerSettingRequestProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000001;
key_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.Builder.class);
}
private int bitField0_;
public static final int KEY_FIELD_NUMBER = 1;
private volatile java.lang.Object key_;
/**
* required string key = 1;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string key = 1;
*/
public java.lang.String getKey() {
java.lang.Object ref = key_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
key_ = s;
}
return s;
}
}
/**
* required string key = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasKey()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, key_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(1, key_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto) obj;
if (hasKey() != other.hasKey()) return false;
if (hasKey()) {
if (!getKey()
.equals(other.getKey())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasKey()) {
hash = (37 * hash) + KEY_FIELD_NUMBER;
hash = (53 * hash) + getKey().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* This message sends a request to data node get a specific setting
* that is used by disk balancer.
*
*
* Protobuf type {@code hadoop.hdfs.DiskBalancerSettingRequestProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.DiskBalancerSettingRequestProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
key_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.key_ = key_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.getDefaultInstance()) return this;
if (other.hasKey()) {
bitField0_ |= 0x00000001;
key_ = other.key_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasKey()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.lang.Object key_ = "";
/**
* required string key = 1;
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string key = 1;
*/
public java.lang.String getKey() {
java.lang.Object ref = key_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
key_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string key = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string key = 1;
*/
public Builder setKey(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
key_ = value;
onChanged();
return this;
}
/**
* required string key = 1;
*/
public Builder clearKey() {
bitField0_ = (bitField0_ & ~0x00000001);
key_ = getDefaultInstance().getKey();
onChanged();
return this;
}
/**
* required string key = 1;
*/
public Builder setKeyBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
key_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DiskBalancerSettingRequestProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DiskBalancerSettingRequestProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public DiskBalancerSettingRequestProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new DiskBalancerSettingRequestProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface DiskBalancerSettingResponseProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:hadoop.hdfs.DiskBalancerSettingResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
/**
* required string value = 1;
*/
boolean hasValue();
/**
* required string value = 1;
*/
java.lang.String getValue();
/**
* required string value = 1;
*/
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getValueBytes();
}
/**
*
**
* Response that describes the value of requested disk balancer setting.
*
*
* Protobuf type {@code hadoop.hdfs.DiskBalancerSettingResponseProto}
*/
public static final class DiskBalancerSettingResponseProto extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:hadoop.hdfs.DiskBalancerSettingResponseProto)
DiskBalancerSettingResponseProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use DiskBalancerSettingResponseProto.newBuilder() to construct.
private DiskBalancerSettingResponseProto(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private DiskBalancerSettingResponseProto() {
value_ = "";
}
@java.lang.Override
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DiskBalancerSettingResponseProto(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.Builder unknownFields =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000001;
value_ = bs;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.org.apache.hadoop.shaded.io.IOException e) {
throw new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.Builder.class);
}
private int bitField0_;
public static final int VALUE_FIELD_NUMBER = 1;
private volatile java.lang.Object value_;
/**
* required string value = 1;
*/
public boolean hasValue() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string value = 1;
*/
public java.lang.String getValue() {
java.lang.Object ref = value_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
value_ = s;
}
return s;
}
}
/**
* required string value = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getValueBytes() {
java.lang.Object ref = value_;
if (ref instanceof java.lang.String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
value_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
if (!hasValue()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
throws java.org.apache.hadoop.shaded.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, value_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.org.apache.hadoop.shaded.com.uteStringSize(1, value_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto other = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto) obj;
if (hasValue() != other.hasValue()) return false;
if (hasValue()) {
if (!getValue()
.equals(other.getValue())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasValue()) {
hash = (37 * hash) + VALUE_FIELD_NUMBER;
hash = (53 * hash) + getValue().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
java.nio.ByteBuffer data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
java.nio.ByteBuffer data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(byte[] data)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
byte[] data,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseDelimitedFrom(java.org.apache.hadoop.shaded.io.InputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseDelimitedFrom(
java.org.apache.hadoop.shaded.io.InputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parseFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
return org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
**
* Response that describes the value of requested disk balancer setting.
*
*
* Protobuf type {@code hadoop.hdfs.DiskBalancerSettingResponseProto}
*/
public static final class Builder extends
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:hadoop.hdfs.DiskBalancerSettingResponseProto)
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProtoOrBuilder {
public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor;
}
@java.lang.Override
protected org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.class, org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
value_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance();
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto build() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto buildPartial() {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto result = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.value_ = value_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message other) {
if (other instanceof org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto) {
return mergeFrom((org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto other) {
if (other == org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance()) return this;
if (other.hasValue()) {
bitField0_ |= 0x00000001;
value_ = other.value_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
if (!hasValue()) {
return false;
}
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.org.apache.hadoop.shaded.io.IOException {
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.lang.Object value_ = "";
/**
* required string value = 1;
*/
public boolean hasValue() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* required string value = 1;
*/
public java.lang.String getValue() {
java.lang.Object ref = value_;
if (!(ref instanceof java.lang.String)) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString bs =
(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
value_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string value = 1;
*/
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString
getValueBytes() {
java.lang.Object ref = value_;
if (ref instanceof String) {
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString b =
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
value_ = b;
return b;
} else {
return (org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
}
}
/**
* required string value = 1;
*/
public Builder setValue(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
value_ = value;
onChanged();
return this;
}
/**
* required string value = 1;
*/
public Builder clearValue() {
bitField0_ = (bitField0_ & ~0x00000001);
value_ = getDefaultInstance().getValue();
onChanged();
return this;
}
/**
* required string value = 1;
*/
public Builder setValueBytes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
value_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:hadoop.hdfs.DiskBalancerSettingResponseProto)
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.DiskBalancerSettingResponseProto)
private static final org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto();
}
public static org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser
PARSER = new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.AbstractParser() {
@java.lang.Override
public DiskBalancerSettingResponseProto parsePartialFrom(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
return new DiskBalancerSettingResponseProto(input, extensionRegistry);
}
};
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
/**
*
**
* Protocol used from client to the Datanode.
* See the request and response for details of rpc call.
*
*
* Protobuf service {@code hadoop.hdfs.ClientDatanodeProtocolService}
*/
public static abstract class ClientDatanodeProtocolService
implements org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Service {
protected ClientDatanodeProtocolService() {}
public interface Interface {
/**
*
**
* Returns the visible length of the replica
*
*
* rpc getReplicaVisibleLength(.hadoop.hdfs.GetReplicaVisibleLengthRequestProto) returns (.hadoop.hdfs.GetReplicaVisibleLengthResponseProto);
*/
public abstract void getReplicaVisibleLength(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Refresh the list of federated namenodes from updated configuration.
* Adds new namenodes and stops the deleted namenodes.
*
*
* rpc refreshNamenodes(.hadoop.hdfs.RefreshNamenodesRequestProto) returns (.hadoop.hdfs.RefreshNamenodesResponseProto);
*/
public abstract void refreshNamenodes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Delete the block pool from the datanode.
*
*
* rpc deleteBlockPool(.hadoop.hdfs.DeleteBlockPoolRequestProto) returns (.hadoop.hdfs.DeleteBlockPoolResponseProto);
*/
public abstract void deleteBlockPool(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Retrieves the path names of the block file and metadata file stored on the
* local file system.
*
*
* rpc getBlockLocalPathInfo(.hadoop.hdfs.GetBlockLocalPathInfoRequestProto) returns (.hadoop.hdfs.GetBlockLocalPathInfoResponseProto);
*/
public abstract void getBlockLocalPathInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc shutdownDatanode(.hadoop.hdfs.ShutdownDatanodeRequestProto) returns (.hadoop.hdfs.ShutdownDatanodeResponseProto);
*/
public abstract void shutdownDatanode(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc evictWriters(.hadoop.hdfs.EvictWritersRequestProto) returns (.hadoop.hdfs.EvictWritersResponseProto);
*/
public abstract void evictWriters(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc getDatanodeInfo(.hadoop.hdfs.GetDatanodeInfoRequestProto) returns (.hadoop.hdfs.GetDatanodeInfoResponseProto);
*/
public abstract void getDatanodeInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc getVolumeReport(.hadoop.hdfs.GetVolumeReportRequestProto) returns (.hadoop.hdfs.GetVolumeReportResponseProto);
*/
public abstract void getVolumeReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc getReconfigurationStatus(.hadoop.hdfs.GetReconfigurationStatusRequestProto) returns (.hadoop.hdfs.GetReconfigurationStatusResponseProto);
*/
public abstract void getReconfigurationStatus(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc startReconfiguration(.hadoop.hdfs.StartReconfigurationRequestProto) returns (.hadoop.hdfs.StartReconfigurationResponseProto);
*/
public abstract void startReconfiguration(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc listReconfigurableProperties(.hadoop.hdfs.ListReconfigurablePropertiesRequestProto) returns (.hadoop.hdfs.ListReconfigurablePropertiesResponseProto);
*/
public abstract void listReconfigurableProperties(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc triggerBlockReport(.hadoop.hdfs.TriggerBlockReportRequestProto) returns (.hadoop.hdfs.TriggerBlockReportResponseProto);
*/
public abstract void triggerBlockReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Returns the balancer bandwidth value of datanode.
*
*
* rpc getBalancerBandwidth(.hadoop.hdfs.GetBalancerBandwidthRequestProto) returns (.hadoop.hdfs.GetBalancerBandwidthResponseProto);
*/
public abstract void getBalancerBandwidth(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Submit a disk balancer plan for execution
*
*
* rpc submitDiskBalancerPlan(.hadoop.hdfs.SubmitDiskBalancerPlanRequestProto) returns (.hadoop.hdfs.SubmitDiskBalancerPlanResponseProto);
*/
public abstract void submitDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Cancel an executing plan
*
*
* rpc cancelDiskBalancerPlan(.hadoop.hdfs.CancelPlanRequestProto) returns (.hadoop.hdfs.CancelPlanResponseProto);
*/
public abstract void cancelDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Gets the status of an executing Plan
*
*
* rpc queryDiskBalancerPlan(.hadoop.hdfs.QueryPlanStatusRequestProto) returns (.hadoop.hdfs.QueryPlanStatusResponseProto);
*/
public abstract void queryDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Gets run-time settings of Disk Balancer.
*
*
* rpc getDiskBalancerSetting(.hadoop.hdfs.DiskBalancerSettingRequestProto) returns (.hadoop.hdfs.DiskBalancerSettingResponseProto);
*/
public abstract void getDiskBalancerSetting(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
}
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Service newReflectiveService(
final Interface impl) {
return new ClientDatanodeProtocolService() {
@java.lang.Override
public void getReplicaVisibleLength(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.getReplicaVisibleLength(controller, request, done);
}
@java.lang.Override
public void refreshNamenodes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.refreshNamenodes(controller, request, done);
}
@java.lang.Override
public void deleteBlockPool(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.deleteBlockPool(controller, request, done);
}
@java.lang.Override
public void getBlockLocalPathInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.getBlockLocalPathInfo(controller, request, done);
}
@java.lang.Override
public void shutdownDatanode(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.shutdownDatanode(controller, request, done);
}
@java.lang.Override
public void evictWriters(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.evictWriters(controller, request, done);
}
@java.lang.Override
public void getDatanodeInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.getDatanodeInfo(controller, request, done);
}
@java.lang.Override
public void getVolumeReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.getVolumeReport(controller, request, done);
}
@java.lang.Override
public void getReconfigurationStatus(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.getReconfigurationStatus(controller, request, done);
}
@java.lang.Override
public void startReconfiguration(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.startReconfiguration(controller, request, done);
}
@java.lang.Override
public void listReconfigurableProperties(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.listReconfigurableProperties(controller, request, done);
}
@java.lang.Override
public void triggerBlockReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.triggerBlockReport(controller, request, done);
}
@java.lang.Override
public void getBalancerBandwidth(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.getBalancerBandwidth(controller, request, done);
}
@java.lang.Override
public void submitDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.submitDiskBalancerPlan(controller, request, done);
}
@java.lang.Override
public void cancelDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.cancelDiskBalancerPlan(controller, request, done);
}
@java.lang.Override
public void queryDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.queryDiskBalancerPlan(controller, request, done);
}
@java.lang.Override
public void getDiskBalancerSetting(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
impl.getDiskBalancerSetting(controller, request, done);
}
};
}
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.BlockingService
newReflectiveBlockingService(final BlockingInterface impl) {
return new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.BlockingService() {
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message callBlockingMethod(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callBlockingMethod() given method descriptor for " +
"wrong service type.");
}
switch(method.getIndex()) {
case 0:
return impl.getReplicaVisibleLength(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)request);
case 1:
return impl.refreshNamenodes(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)request);
case 2:
return impl.deleteBlockPool(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)request);
case 3:
return impl.getBlockLocalPathInfo(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)request);
case 4:
return impl.shutdownDatanode(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)request);
case 5:
return impl.evictWriters(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto)request);
case 6:
return impl.getDatanodeInfo(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)request);
case 7:
return impl.getVolumeReport(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto)request);
case 8:
return impl.getReconfigurationStatus(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto)request);
case 9:
return impl.startReconfiguration(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto)request);
case 10:
return impl.listReconfigurableProperties(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto)request);
case 11:
return impl.triggerBlockReport(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)request);
case 12:
return impl.getBalancerBandwidth(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto)request);
case 13:
return impl.submitDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto)request);
case 14:
return impl.cancelDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto)request);
case 15:
return impl.queryDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto)request);
case 16:
return impl.getDiskBalancerSetting(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message
getRequestPrototype(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance();
case 5:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.getDefaultInstance();
case 6:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance();
case 7:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.getDefaultInstance();
case 8:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto.getDefaultInstance();
case 9:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto.getDefaultInstance();
case 10:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto.getDefaultInstance();
case 11:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance();
case 12:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.getDefaultInstance();
case 13:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.getDefaultInstance();
case 14:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.getDefaultInstance();
case 15:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.getDefaultInstance();
case 16:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message
getResponsePrototype(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance();
case 5:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance();
case 6:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance();
case 7:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance();
case 8:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance();
case 9:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance();
case 10:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.getDefaultInstance();
case 11:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance();
case 12:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance();
case 13:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance();
case 14:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance();
case 15:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance();
case 16:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
};
}
/**
*
**
* Returns the visible length of the replica
*
*
* rpc getReplicaVisibleLength(.hadoop.hdfs.GetReplicaVisibleLengthRequestProto) returns (.hadoop.hdfs.GetReplicaVisibleLengthResponseProto);
*/
public abstract void getReplicaVisibleLength(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Refresh the list of federated namenodes from updated configuration.
* Adds new namenodes and stops the deleted namenodes.
*
*
* rpc refreshNamenodes(.hadoop.hdfs.RefreshNamenodesRequestProto) returns (.hadoop.hdfs.RefreshNamenodesResponseProto);
*/
public abstract void refreshNamenodes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Delete the block pool from the datanode.
*
*
* rpc deleteBlockPool(.hadoop.hdfs.DeleteBlockPoolRequestProto) returns (.hadoop.hdfs.DeleteBlockPoolResponseProto);
*/
public abstract void deleteBlockPool(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Retrieves the path names of the block file and metadata file stored on the
* local file system.
*
*
* rpc getBlockLocalPathInfo(.hadoop.hdfs.GetBlockLocalPathInfoRequestProto) returns (.hadoop.hdfs.GetBlockLocalPathInfoResponseProto);
*/
public abstract void getBlockLocalPathInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc shutdownDatanode(.hadoop.hdfs.ShutdownDatanodeRequestProto) returns (.hadoop.hdfs.ShutdownDatanodeResponseProto);
*/
public abstract void shutdownDatanode(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc evictWriters(.hadoop.hdfs.EvictWritersRequestProto) returns (.hadoop.hdfs.EvictWritersResponseProto);
*/
public abstract void evictWriters(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc getDatanodeInfo(.hadoop.hdfs.GetDatanodeInfoRequestProto) returns (.hadoop.hdfs.GetDatanodeInfoResponseProto);
*/
public abstract void getDatanodeInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc getVolumeReport(.hadoop.hdfs.GetVolumeReportRequestProto) returns (.hadoop.hdfs.GetVolumeReportResponseProto);
*/
public abstract void getVolumeReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc getReconfigurationStatus(.hadoop.hdfs.GetReconfigurationStatusRequestProto) returns (.hadoop.hdfs.GetReconfigurationStatusResponseProto);
*/
public abstract void getReconfigurationStatus(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc startReconfiguration(.hadoop.hdfs.StartReconfigurationRequestProto) returns (.hadoop.hdfs.StartReconfigurationResponseProto);
*/
public abstract void startReconfiguration(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc listReconfigurableProperties(.hadoop.hdfs.ListReconfigurablePropertiesRequestProto) returns (.hadoop.hdfs.ListReconfigurablePropertiesResponseProto);
*/
public abstract void listReconfigurableProperties(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
* rpc triggerBlockReport(.hadoop.hdfs.TriggerBlockReportRequestProto) returns (.hadoop.hdfs.TriggerBlockReportResponseProto);
*/
public abstract void triggerBlockReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Returns the balancer bandwidth value of datanode.
*
*
* rpc getBalancerBandwidth(.hadoop.hdfs.GetBalancerBandwidthRequestProto) returns (.hadoop.hdfs.GetBalancerBandwidthResponseProto);
*/
public abstract void getBalancerBandwidth(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Submit a disk balancer plan for execution
*
*
* rpc submitDiskBalancerPlan(.hadoop.hdfs.SubmitDiskBalancerPlanRequestProto) returns (.hadoop.hdfs.SubmitDiskBalancerPlanResponseProto);
*/
public abstract void submitDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Cancel an executing plan
*
*
* rpc cancelDiskBalancerPlan(.hadoop.hdfs.CancelPlanRequestProto) returns (.hadoop.hdfs.CancelPlanResponseProto);
*/
public abstract void cancelDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Gets the status of an executing Plan
*
*
* rpc queryDiskBalancerPlan(.hadoop.hdfs.QueryPlanStatusRequestProto) returns (.hadoop.hdfs.QueryPlanStatusResponseProto);
*/
public abstract void queryDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
/**
*
**
* Gets run-time settings of Disk Balancer.
*
*
* rpc getDiskBalancerSetting(.hadoop.hdfs.DiskBalancerSettingRequestProto) returns (.hadoop.hdfs.DiskBalancerSettingResponseProto);
*/
public abstract void getDiskBalancerSetting(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done);
public static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.getDescriptor().getServices().get(0);
}
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final void callMethod(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback<
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message> done) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callMethod() given method descriptor for wrong " +
"service type.");
}
switch(method.getIndex()) {
case 0:
this.getReplicaVisibleLength(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 1:
this.refreshNamenodes(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 2:
this.deleteBlockPool(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 3:
this.getBlockLocalPathInfo(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 4:
this.shutdownDatanode(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 5:
this.evictWriters(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 6:
this.getDatanodeInfo(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 7:
this.getVolumeReport(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 8:
this.getReconfigurationStatus(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 9:
this.startReconfiguration(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 10:
this.listReconfigurableProperties(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 11:
this.triggerBlockReport(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 12:
this.getBalancerBandwidth(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 13:
this.submitDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 14:
this.cancelDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 15:
this.queryDiskBalancerPlan(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
case 16:
this.getDiskBalancerSetting(controller, (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto)request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.specializeCallback(
done));
return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message
getRequestPrototype(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto.getDefaultInstance();
case 5:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto.getDefaultInstance();
case 6:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto.getDefaultInstance();
case 7:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto.getDefaultInstance();
case 8:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto.getDefaultInstance();
case 9:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto.getDefaultInstance();
case 10:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto.getDefaultInstance();
case 11:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto.getDefaultInstance();
case 12:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto.getDefaultInstance();
case 13:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto.getDefaultInstance();
case 14:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto.getDefaultInstance();
case 15:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto.getDefaultInstance();
case 16:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Message
getResponsePrototype(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance();
case 5:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance();
case 6:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance();
case 7:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance();
case 8:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance();
case 9:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance();
case 10:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.getDefaultInstance();
case 11:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance();
case 12:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance();
case 13:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance();
case 14:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance();
case 15:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance();
case 16:
return org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public static Stub newStub(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcChannel channel) {
return new Stub(channel);
}
public static final class Stub extends org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService implements Interface {
private Stub(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcChannel channel) {
this.channel = channel;
}
private final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcChannel channel;
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcChannel getChannel() {
return channel;
}
public void getReplicaVisibleLength(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance()));
}
public void refreshNamenodes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance()));
}
public void deleteBlockPool(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance()));
}
public void getBlockLocalPathInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance()));
}
public void shutdownDatanode(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance()));
}
public void evictWriters(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance()));
}
public void getDatanodeInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance()));
}
public void getVolumeReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance()));
}
public void getReconfigurationStatus(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance()));
}
public void startReconfiguration(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(9),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance()));
}
public void listReconfigurableProperties(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(10),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.getDefaultInstance()));
}
public void triggerBlockReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(11),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance()));
}
public void getBalancerBandwidth(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(12),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance()));
}
public void submitDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(13),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance()));
}
public void cancelDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(14),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance()));
}
public void queryDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(15),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance()));
}
public void getDiskBalancerSetting(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request,
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(16),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance(),
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.class,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance()));
}
}
public static BlockingInterface newBlockingStub(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel) {
return new BlockingStub(channel);
}
public interface BlockingInterface {
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getReplicaVisibleLength(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto refreshNamenodes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto deleteBlockPool(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto shutdownDatanode(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto evictWriters(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto getDatanodeInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto getVolumeReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto getReconfigurationStatus(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto startReconfiguration(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto listReconfigurableProperties(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto triggerBlockReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto getBalancerBandwidth(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto submitDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto cancelDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto queryDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto getDiskBalancerSetting(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
private BlockingStub(org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel) {
this.channel = channel;
}
private final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel;
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getReplicaVisibleLength(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto refreshNamenodes(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto deleteBlockPool(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto shutdownDatanode(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto evictWriters(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto getDatanodeInfo(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto getVolumeReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto getReconfigurationStatus(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto startReconfiguration(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(9),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto listReconfigurableProperties(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(10),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto triggerBlockReport(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(11),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto getBalancerBandwidth(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(12),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto submitDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(13),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto cancelDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(14),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto queryDiskBalancerPlan(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(15),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto.getDefaultInstance());
}
public org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto getDiskBalancerSetting(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.RpcController controller,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto request)
throws org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ServiceException {
return (org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(16),
controller,
request,
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto.getDefaultInstance());
}
}
// @@protoc_insertion_point(class_scope:hadoop.hdfs.ClientDatanodeProtocolService)
}
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_EvictWritersRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_EvictWritersResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_GetVolumeReportRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_GetVolumeReportResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_CancelPlanRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_CancelPlanResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_fieldAccessorTable;
private static final org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor;
private static final
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_fieldAccessorTable;
public static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\034ClientDatanodeProtocol.proto\022\013hadoop.h" +
"dfs\032\016Security.proto\032\nhdfs.proto\032\035Reconfi" +
"gurationProtocol.proto\"U\n#GetReplicaVisi" +
"bleLengthRequestProto\022.\n\005block\030\001 \002(\0132\037.h" +
"adoop.hdfs.ExtendedBlockProto\"6\n$GetRepl" +
"icaVisibleLengthResponseProto\022\016\n\006length\030" +
"\001 \002(\004\"\036\n\034RefreshNamenodesRequestProto\"\037\n" +
"\035RefreshNamenodesResponseProto\"?\n\033Delete" +
"BlockPoolRequestProto\022\021\n\tblockPool\030\001 \002(\t" +
"\022\r\n\005force\030\002 \002(\010\"\036\n\034DeleteBlockPoolRespon" +
"seProto\"}\n!GetBlockLocalPathInfoRequestP" +
"roto\022.\n\005block\030\001 \002(\0132\037.hadoop.hdfs.Extend" +
"edBlockProto\022(\n\005token\030\002 \002(\0132\031.hadoop.org.apache.hadoop.shaded.com. +
"mon.TokenProto\"~\n\"GetBlockLocalPathInfoR" +
"esponseProto\022.\n\005block\030\001 \002(\0132\037.hadoop.hdf" +
"s.ExtendedBlockProto\022\021\n\tlocalPath\030\002 \002(\t\022" +
"\025\n\rlocalMetaPath\030\003 \002(\t\"2\n\034ShutdownDatano" +
"deRequestProto\022\022\n\nforUpgrade\030\001 \002(\010\"\037\n\035Sh" +
"utdownDatanodeResponseProto\"\032\n\030EvictWrit" +
"ersRequestProto\"\033\n\031EvictWritersResponseP" +
"roto\"\035\n\033GetDatanodeInfoRequestProto\"V\n\034G" +
"etDatanodeInfoResponseProto\0226\n\tlocalInfo" +
"\030\001 \002(\0132#.hadoop.hdfs.DatanodeLocalInfoPr" +
"oto\"\035\n\033GetVolumeReportRequestProto\"X\n\034Ge" +
"tVolumeReportResponseProto\0228\n\nvolumeInfo" +
"\030\001 \003(\0132$.hadoop.hdfs.DatanodeVolumeInfoP" +
"roto\"H\n\036TriggerBlockReportRequestProto\022\023" +
"\n\013incremental\030\001 \002(\010\022\021\n\tnnAddress\030\002 \001(\t\"!" +
"\n\037TriggerBlockReportResponseProto\"\"\n Get" +
"BalancerBandwidthRequestProto\"6\n!GetBala" +
"ncerBandwidthResponseProto\022\021\n\tbandwidth\030" +
"\001 \002(\004\"\202\001\n\"SubmitDiskBalancerPlanRequestP" +
"roto\022\016\n\006planID\030\001 \002(\t\022\014\n\004plan\030\002 \002(\t\022\023\n\013pl" +
"anVersion\030\003 \001(\004\022\027\n\017ignoreDateCheck\030\004 \001(\010" +
"\022\020\n\010planFile\030\005 \002(\t\"%\n#SubmitDiskBalancer" +
"PlanResponseProto\"(\n\026CancelPlanRequestPr" +
"oto\022\016\n\006planID\030\001 \002(\t\"\031\n\027CancelPlanRespons" +
"eProto\"\035\n\033QueryPlanStatusRequestProto\"g\n" +
"\034QueryPlanStatusResponseProto\022\016\n\006result\030" +
"\001 \001(\r\022\016\n\006planID\030\002 \001(\t\022\025\n\rcurrentStatus\030\003" +
" \001(\t\022\020\n\010planFile\030\004 \001(\t\".\n\037DiskBalancerSe" +
"ttingRequestProto\022\013\n\003key\030\001 \002(\t\"1\n DiskBa" +
"lancerSettingResponseProto\022\r\n\005value\030\001 \002(" +
"\t2\300\017\n\035ClientDatanodeProtocolService\022~\n\027g" +
"etReplicaVisibleLength\0220.hadoop.hdfs.Get" +
"ReplicaVisibleLengthRequestProto\0321.hadoo" +
"p.hdfs.GetReplicaVisibleLengthResponsePr" +
"oto\022i\n\020refreshNamenodes\022).hadoop.hdfs.Re" +
"freshNamenodesRequestProto\032*.hadoop.hdfs" +
".RefreshNamenodesResponseProto\022f\n\017delete" +
"BlockPool\022(.hadoop.hdfs.DeleteBlockPoolR" +
"equestProto\032).hadoop.hdfs.DeleteBlockPoo" +
"lResponseProto\022x\n\025getBlockLocalPathInfo\022" +
"..hadoop.hdfs.GetBlockLocalPathInfoReque" +
"stProto\032/.hadoop.hdfs.GetBlockLocalPathI" +
"nfoResponseProto\022i\n\020shutdownDatanode\022).h" +
"adoop.hdfs.ShutdownDatanodeRequestProto\032" +
"*.hadoop.hdfs.ShutdownDatanodeResponsePr" +
"oto\022]\n\014evictWriters\022%.hadoop.hdfs.EvictW" +
"ritersRequestProto\032&.hadoop.hdfs.EvictWr" +
"itersResponseProto\022f\n\017getDatanodeInfo\022(." +
"hadoop.hdfs.GetDatanodeInfoRequestProto\032" +
").hadoop.hdfs.GetDatanodeInfoResponsePro" +
"to\022f\n\017getVolumeReport\022(.hadoop.hdfs.GetV" +
"olumeReportRequestProto\032).hadoop.hdfs.Ge" +
"tVolumeReportResponseProto\022\201\001\n\030getReconf" +
"igurationStatus\0221.hadoop.hdfs.GetReconfi" +
"gurationStatusRequestProto\0322.hadoop.hdfs" +
".GetReconfigurationStatusResponseProto\022u" +
"\n\024startReconfiguration\022-.hadoop.hdfs.Sta" +
"rtReconfigurationRequestProto\032..hadoop.h" +
"dfs.StartReconfigurationResponseProto\022\215\001" +
"\n\034listReconfigurableProperties\0225.hadoop." +
"hdfs.ListReconfigurablePropertiesRequest" +
"Proto\0326.hadoop.hdfs.ListReconfigurablePr" +
"opertiesResponseProto\022o\n\022triggerBlockRep" +
"ort\022+.hadoop.hdfs.TriggerBlockReportRequ" +
"estProto\032,.hadoop.hdfs.TriggerBlockRepor" +
"tResponseProto\022u\n\024getBalancerBandwidth\022-" +
".hadoop.hdfs.GetBalancerBandwidthRequest" +
"Proto\032..hadoop.hdfs.GetBalancerBandwidth" +
"ResponseProto\022{\n\026submitDiskBalancerPlan\022" +
"/.hadoop.hdfs.SubmitDiskBalancerPlanRequ" +
"estProto\0320.hadoop.hdfs.SubmitDiskBalance" +
"rPlanResponseProto\022c\n\026cancelDiskBalancer" +
"Plan\022#.hadoop.hdfs.CancelPlanRequestProt" +
"o\032$.hadoop.hdfs.CancelPlanResponseProto\022" +
"l\n\025queryDiskBalancerPlan\022(.hadoop.hdfs.Q" +
"ueryPlanStatusRequestProto\032).hadoop.hdfs" +
".QueryPlanStatusResponseProto\022u\n\026getDisk" +
"BalancerSetting\022,.hadoop.hdfs.DiskBalanc" +
"erSettingRequestProto\032-.hadoop.hdfs.Disk" +
"BalancerSettingResponseProtoBK\n%org.apache.hadoop.shaded.org.apac" +
"he.hadoop.hdfs.protocol.protoB\034ClientDat" +
"anodeProtocolProtos\210\001\001\240\001\001"
};
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
public org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry assignDescriptors(
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
return null;
}
};
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(),
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.getDescriptor(),
}, assigner);
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_GetReplicaVisibleLengthRequestProto_descriptor,
new java.lang.String[] { "Block", });
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_GetReplicaVisibleLengthResponseProto_descriptor,
new java.lang.String[] { "Length", });
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_RefreshNamenodesRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_RefreshNamenodesResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_DeleteBlockPoolRequestProto_descriptor,
new java.lang.String[] { "BlockPool", "Force", });
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_DeleteBlockPoolResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_GetBlockLocalPathInfoRequestProto_descriptor,
new java.lang.String[] { "Block", "Token", });
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_GetBlockLocalPathInfoResponseProto_descriptor,
new java.lang.String[] { "Block", "LocalPath", "LocalMetaPath", });
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ShutdownDatanodeRequestProto_descriptor,
new java.lang.String[] { "ForUpgrade", });
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_ShutdownDatanodeResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor =
getDescriptor().getMessageTypes().get(10);
internal_static_hadoop_hdfs_EvictWritersRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_EvictWritersRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor =
getDescriptor().getMessageTypes().get(11);
internal_static_hadoop_hdfs_EvictWritersResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_EvictWritersResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor =
getDescriptor().getMessageTypes().get(12);
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_GetDatanodeInfoRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor =
getDescriptor().getMessageTypes().get(13);
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_GetDatanodeInfoResponseProto_descriptor,
new java.lang.String[] { "LocalInfo", });
internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor =
getDescriptor().getMessageTypes().get(14);
internal_static_hadoop_hdfs_GetVolumeReportRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_GetVolumeReportRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor =
getDescriptor().getMessageTypes().get(15);
internal_static_hadoop_hdfs_GetVolumeReportResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_GetVolumeReportResponseProto_descriptor,
new java.lang.String[] { "VolumeInfo", });
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor =
getDescriptor().getMessageTypes().get(16);
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_TriggerBlockReportRequestProto_descriptor,
new java.lang.String[] { "Incremental", "NnAddress", });
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor =
getDescriptor().getMessageTypes().get(17);
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_TriggerBlockReportResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor =
getDescriptor().getMessageTypes().get(18);
internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_GetBalancerBandwidthRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor =
getDescriptor().getMessageTypes().get(19);
internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_GetBalancerBandwidthResponseProto_descriptor,
new java.lang.String[] { "Bandwidth", });
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor =
getDescriptor().getMessageTypes().get(20);
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanRequestProto_descriptor,
new java.lang.String[] { "PlanID", "Plan", "PlanVersion", "IgnoreDateCheck", "PlanFile", });
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor =
getDescriptor().getMessageTypes().get(21);
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_SubmitDiskBalancerPlanResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor =
getDescriptor().getMessageTypes().get(22);
internal_static_hadoop_hdfs_CancelPlanRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_CancelPlanRequestProto_descriptor,
new java.lang.String[] { "PlanID", });
internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor =
getDescriptor().getMessageTypes().get(23);
internal_static_hadoop_hdfs_CancelPlanResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_CancelPlanResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor =
getDescriptor().getMessageTypes().get(24);
internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_QueryPlanStatusRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor =
getDescriptor().getMessageTypes().get(25);
internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_QueryPlanStatusResponseProto_descriptor,
new java.lang.String[] { "Result", "PlanID", "CurrentStatus", "PlanFile", });
internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor =
getDescriptor().getMessageTypes().get(26);
internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_DiskBalancerSettingRequestProto_descriptor,
new java.lang.String[] { "Key", });
internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor =
getDescriptor().getMessageTypes().get(27);
internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_fieldAccessorTable = new
org.apache.hadoop.shaded.org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hadoop_hdfs_DiskBalancerSettingResponseProto_descriptor,
new java.lang.String[] { "Value", });
org.apache.hadoop.shaded.org.apache.hadoop.security.proto.SecurityProtos.getDescriptor();
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor();
org.apache.hadoop.shaded.org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy